2 * linux/arch/alpha/kernel/smp.c
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/irq.h>
19 #include <asm/hwrpb.h>
20 #include <asm/ptrace.h>
21 #include <asm/atomic.h>
25 #include <asm/bitops.h>
26 #include <asm/pgtable.h>
27 #include <asm/pgalloc.h>
28 #include <asm/hardirq.h>
29 #include <asm/softirq.h>
30 #include <asm/mmu_context.h>
32 #define __KERNEL_SYSCALLS__
33 #include <asm/unistd.h>
41 #define DBGS(args) printk args
46 /* A collection of per-processor data. */
47 struct cpuinfo_alpha cpu_data
[NR_CPUS
];
49 /* A collection of single bit ipi messages. */
51 unsigned long bits ____cacheline_aligned
;
52 } ipi_data
[NR_CPUS
] __cacheline_aligned
;
54 enum ipi_message_type
{
60 spinlock_t kernel_flag
= SPIN_LOCK_UNLOCKED
;
62 /* Set to a secondary's cpuid when it comes online. */
63 static unsigned long smp_secondary_alive
;
65 /* Which cpus ids came online. */
66 unsigned long cpu_present_mask
;
68 /* cpus reported in the hwrpb */
69 static unsigned long hwrpb_cpu_present_mask __initdata
= 0;
71 static int max_cpus
= -1; /* Command-line limitation. */
72 int smp_num_probed
; /* Internal processor count */
73 int smp_num_cpus
= 1; /* Number that came online. */
74 int smp_threads_ready
; /* True once the per process idle is forked. */
75 cycles_t cacheflush_time
;
77 int __cpu_number_map
[NR_CPUS
];
78 int __cpu_logical_map
[NR_CPUS
];
80 extern void calibrate_delay(void);
81 extern asmlinkage
void entInt(void);
84 static int __init
nosmp(char *str
)
90 __setup("nosmp", nosmp
);
92 static int __init
maxcpus(char *str
)
94 get_option(&str
, &max_cpus
);
98 __setup("maxcpus", maxcpus
);
102 * Called by both boot and secondaries to move global data into
103 * per-processor storage.
105 static inline void __init
106 smp_store_cpu_info(int cpuid
)
108 cpu_data
[cpuid
].loops_per_sec
= loops_per_sec
;
109 cpu_data
[cpuid
].last_asn
110 = (cpuid
<< WIDTH_HARDWARE_ASN
) + ASN_FIRST_VERSION
;
111 local_irq_count(cpuid
) = 0;
112 local_bh_count(cpuid
) = 0;
116 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
118 static inline void __init
119 smp_setup_percpu_timer(int cpuid
)
121 cpu_data
[cpuid
].prof_counter
= 1;
122 cpu_data
[cpuid
].prof_multiplier
= 1;
126 * Where secondaries begin a life of C.
131 int cpuid
= hard_smp_processor_id();
133 DBGS(("CALLIN %d state 0x%lx\n", cpuid
, current
->state
));
135 /* Turn on machine checks. */
138 /* Set trap vectors. */
141 /* Set interrupt vector. */
144 /* Setup the scheduler for this processor. */
147 /* ??? This should be in init_idle. */
148 atomic_inc(&init_mm
.mm_count
);
149 current
->active_mm
= &init_mm
;
151 /* Get our local ticker going. */
152 smp_setup_percpu_timer(cpuid
);
154 /* Must have completely accurate bogos. */
157 smp_store_cpu_info(cpuid
);
159 /* Allow master to continue. */
161 smp_secondary_alive
= cpuid
;
163 /* Wait for the go code. */
164 while (!smp_threads_ready
)
167 DBGS(("smp_callin: commencing CPU %d current %p\n",
176 * Rough estimation for SMP scheduling, this is the number of cycles it
177 * takes for a fully memory-limited process to flush the SMP-local cache.
179 * We are not told how much cache there is, so we have to guess.
182 smp_tune_scheduling (void)
184 struct percpu_struct
*cpu
;
185 unsigned long on_chip_cache
;
188 cpu
= (struct percpu_struct
*)((char*)hwrpb
+ hwrpb
->processor_offset
);
192 on_chip_cache
= 16 + 16;
197 on_chip_cache
= 8 + 8 + 96;
201 on_chip_cache
= 16 + 8;
205 on_chip_cache
= 64 + 64;
209 on_chip_cache
= 8 + 8;
213 freq
= hwrpb
->cycle_freq
? : est_cycle_freq
;
216 /* Magic estimation stolen from x86 port. */
217 cacheflush_time
= freq
/ 1024L * on_chip_cache
/ 5000L;
219 printk("Using heuristic of %d cycles.\n",
222 /* Magic value to force potential preemption of other CPUs. */
223 cacheflush_time
= INT_MAX
;
225 printk("Using heuristic of %d cycles.\n",
231 * Send a message to a secondary's console. "START" is one such
232 * interesting message. ;-)
235 send_secondary_console_msg(char *str
, int cpuid
)
237 struct percpu_struct
*cpu
;
238 register char *cp1
, *cp2
;
239 unsigned long cpumask
;
243 cpu
= (struct percpu_struct
*)
245 + hwrpb
->processor_offset
246 + cpuid
* hwrpb
->processor_size
);
248 cpumask
= (1L << cpuid
);
249 if (hwrpb
->txrdy
& cpumask
)
255 *(unsigned int *)&cpu
->ipc_buffer
[0] = len
;
256 cp1
= (char *) &cpu
->ipc_buffer
[1];
257 memcpy(cp1
, cp2
, len
);
259 /* atomic test and set */
261 set_bit(cpuid
, &hwrpb
->rxrdy
);
263 if (hwrpb
->txrdy
& cpumask
)
269 /* Wait one second. Note that jiffies aren't ticking yet. */
270 for (timeout
= 100000; timeout
> 0; --timeout
) {
271 if (!(hwrpb
->txrdy
& cpumask
))
279 /* Wait one second. */
280 for (timeout
= 100000; timeout
> 0; --timeout
) {
281 if (!(hwrpb
->txrdy
& cpumask
))
289 printk("Processor %x not ready\n", cpuid
);
294 * A secondary console wants to send a message. Receive it.
297 recv_secondary_console_msg(void)
300 unsigned long txrdy
= hwrpb
->txrdy
;
301 char *cp1
, *cp2
, buf
[80];
302 struct percpu_struct
*cpu
;
304 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy
));
306 mycpu
= hard_smp_processor_id();
308 for (i
= 0; i
< NR_CPUS
; i
++) {
309 if (!(txrdy
& (1L << i
)))
312 DBGS(("recv_secondary_console_msg: "
313 "TXRDY contains CPU %d.\n", i
));
315 cpu
= (struct percpu_struct
*)
317 + hwrpb
->processor_offset
318 + i
* hwrpb
->processor_size
);
320 DBGS(("recv_secondary_console_msg: on %d from %d"
321 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
322 mycpu
, i
, cpu
->halt_reason
, cpu
->flags
));
324 cnt
= cpu
->ipc_buffer
[0] >> 32;
325 if (cnt
<= 0 || cnt
>= 80)
326 strcpy(buf
, "<<< BOGUS MSG >>>");
328 cp1
= (char *) &cpu
->ipc_buffer
[11];
332 while ((cp2
= strchr(cp2
, '\r')) != 0) {
339 DBGS((KERN_INFO
"recv_secondary_console_msg: on %d "
340 "message is '%s'\n", mycpu
, buf
));
347 * Convince the console to have a secondary cpu begin execution.
350 secondary_cpu_start(int cpuid
, struct task_struct
*idle
)
352 struct percpu_struct
*cpu
;
353 struct pcb_struct
*hwpcb
;
356 cpu
= (struct percpu_struct
*)
358 + hwrpb
->processor_offset
359 + cpuid
* hwrpb
->processor_size
);
360 hwpcb
= (struct pcb_struct
*) cpu
->hwpcb
;
362 /* Initialize the CPU's HWPCB to something just good enough for
363 us to get started. Immediately after starting, we'll swpctx
364 to the target idle task's ptb. Reuse the stack in the mean
365 time. Precalculate the target PCBB. */
366 hwpcb
->ksp
= (unsigned long) idle
+ sizeof(union task_union
) - 16;
368 hwpcb
->ptbr
= idle
->thread
.ptbr
;
371 hwpcb
->unique
= virt_to_phys(&idle
->thread
);
372 hwpcb
->flags
= idle
->thread
.pal_flags
;
373 hwpcb
->res1
= hwpcb
->res2
= 0;
376 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
377 hwpcb
->ksp
, hwpcb
->ptbr
, hwrpb
->vptb
, hwcpb
->unique
));
379 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
380 cpuid
, idle
->state
, idle
->thread
.pal_flags
));
382 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
383 hwrpb
->CPU_restart
= __smp_callin
;
384 hwrpb
->CPU_restart_data
= (unsigned long) __smp_callin
;
386 /* Recalculate and update the HWRPB checksum */
387 hwrpb_update_checksum(hwrpb
);
390 * Send a "start" command to the specified processor.
393 /* SRM III 3.4.1.3 */
394 cpu
->flags
|= 0x22; /* turn on Context Valid and Restart Capable */
395 cpu
->flags
&= ~1; /* turn off Bootstrap In Progress */
398 send_secondary_console_msg("START\r\n", cpuid
);
400 /* Wait 1 second for an ACK from the console. Note that jiffies
401 aren't ticking yet. */
402 for (timeout
= 100000; timeout
> 0; timeout
--) {
408 printk(KERN_ERR
"SMP: Processor %d failed to start.\n", cpuid
);
412 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid
));
416 static int __init
fork_by_hand(void)
420 * don't care about the regs settings since
421 * we'll never reschedule the forked task.
423 return do_fork(CLONE_VM
|CLONE_PID
, 0, ®s
, 0);
427 * Bring one cpu online.
430 smp_boot_one_cpu(int cpuid
, int cpunum
)
432 struct task_struct
*idle
;
435 /* Cook up an idler for this guy. Note that the address we give
436 to kernel_thread is irrelevant -- it's going to start where
437 HWRPB.CPU_restart says to start. But this gets all the other
438 task-y sort of data structures set up like we wish. */
440 * We can't use kernel_thread since we must avoid to
441 * reschedule the child.
443 if (fork_by_hand() < 0)
444 panic("failed fork for CPU %d", cpuid
);
446 idle
= init_task
.prev_task
;
448 panic("No idle process for CPU %d", cpuid
);
450 idle
->processor
= cpuid
;
451 __cpu_logical_map
[cpunum
] = cpuid
;
452 __cpu_number_map
[cpuid
] = cpunum
;
453 idle
->has_cpu
= 1; /* we schedule the first task manually */
455 del_from_runqueue(idle
);
456 unhash_process(idle
);
457 init_tasks
[cpunum
] = idle
;
459 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
460 cpuid
, idle
->state
, idle
->flags
));
462 /* The secondary will change this once it is happy. Note that
463 secondary_cpu_start contains the necessary memory barrier. */
464 smp_secondary_alive
= -1;
466 /* Whirrr, whirrr, whirrrrrrrrr... */
467 if (secondary_cpu_start(cpuid
, idle
))
470 /* We've been acked by the console; wait one second for the task
471 to start up for real. Note that jiffies aren't ticking yet. */
472 for (timeout
= 0; timeout
< 100000; timeout
++) {
473 if (smp_secondary_alive
!= -1)
479 /* we must invalidate our stuff as we failed to boot the CPU */
480 __cpu_logical_map
[cpunum
] = -1;
481 __cpu_number_map
[cpuid
] = -1;
483 /* the idle task is local to us so free it as we don't use it */
484 free_task_struct(idle
);
486 printk(KERN_ERR
"SMP: Processor %d is stuck.\n", cpuid
);
490 /* Another "Red Snapper". */
495 * Called from setup_arch. Detect an SMP system and which processors
501 struct percpu_struct
*cpubase
, *cpu
;
504 if (boot_cpuid
!= 0) {
505 printk(KERN_WARNING
"SMP: Booting off cpu %d instead of 0?\n",
509 if (hwrpb
->nr_processors
> 1) {
512 DBGS(("setup_smp: nr_processors %ld\n",
513 hwrpb
->nr_processors
));
515 cpubase
= (struct percpu_struct
*)
516 ((char*)hwrpb
+ hwrpb
->processor_offset
);
517 boot_cpu_palrev
= cpubase
->pal_revision
;
519 for (i
= 0; i
< hwrpb
->nr_processors
; i
++ ) {
520 cpu
= (struct percpu_struct
*)
521 ((char *)cpubase
+ i
*hwrpb
->processor_size
);
522 if ((cpu
->flags
& 0x1cc) == 0x1cc) {
524 /* Assume here that "whami" == index */
525 hwrpb_cpu_present_mask
|= (1L << i
);
526 cpu
->pal_revision
= boot_cpu_palrev
;
529 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
530 i
, cpu
->flags
, cpu
->type
));
531 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
532 i
, cpu
->pal_revision
));
536 hwrpb_cpu_present_mask
= (1L << boot_cpuid
);
538 cpu_present_mask
= 1L << boot_cpuid
;
540 printk(KERN_INFO
"SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
541 smp_num_probed
, hwrpb_cpu_present_mask
);
545 * Called by smp_init bring all the secondaries online and hold them.
551 unsigned long bogosum
;
553 /* Take care of some initial bookkeeping. */
554 memset(__cpu_number_map
, -1, sizeof(__cpu_number_map
));
555 memset(__cpu_logical_map
, -1, sizeof(__cpu_logical_map
));
556 memset(ipi_data
, 0, sizeof(ipi_data
));
558 __cpu_number_map
[boot_cpuid
] = 0;
559 __cpu_logical_map
[0] = boot_cpuid
;
560 current
->processor
= boot_cpuid
;
562 smp_store_cpu_info(boot_cpuid
);
563 smp_tune_scheduling();
564 smp_setup_percpu_timer(boot_cpuid
);
568 /* ??? This should be in init_idle. */
569 atomic_inc(&init_mm
.mm_count
);
570 current
->active_mm
= &init_mm
;
572 /* Nothing to do on a UP box, or when told not to. */
573 if (smp_num_probed
== 1 || max_cpus
== 0) {
574 printk(KERN_INFO
"SMP mode deactivated.\n");
578 printk(KERN_INFO
"SMP starting up secondaries.\n");
581 for (i
= 0; i
< NR_CPUS
; i
++) {
585 if (((hwrpb_cpu_present_mask
>> i
) & 1) == 0)
588 if (smp_boot_one_cpu(i
, cpu_count
))
591 cpu_present_mask
|= 1L << i
;
595 if (cpu_count
== 1) {
596 printk(KERN_ERR
"SMP: Only one lonely processor alive.\n");
601 for (i
= 0; i
< NR_CPUS
; i
++) {
602 if (cpu_present_mask
& (1L << i
))
603 bogosum
+= cpu_data
[i
].loops_per_sec
;
605 printk(KERN_INFO
"SMP: Total of %d processors activated "
606 "(%lu.%02lu BogoMIPS).\n",
607 cpu_count
, (bogosum
+ 2500) / 500000,
608 ((bogosum
+ 2500) / 5000) % 100);
610 smp_num_cpus
= cpu_count
;
614 * Called by smp_init to release the blocking online cpus once they
620 /* smp_init sets smp_threads_ready -- that's enough. */
626 smp_percpu_timer_interrupt(struct pt_regs
*regs
)
628 int cpu
= smp_processor_id();
629 unsigned long user
= user_mode(regs
);
630 struct cpuinfo_alpha
*data
= &cpu_data
[cpu
];
632 /* Record kernel PC. */
634 alpha_do_profile(regs
->pc
);
636 if (!--data
->prof_counter
) {
637 /* We need to make like a normal interrupt -- otherwise
638 timer interrupts ignore the global interrupt lock,
639 which would be a Bad Thing. */
640 irq_enter(cpu
, RTC_IRQ
);
642 update_process_times(user
);
644 data
->prof_counter
= data
->prof_multiplier
;
645 irq_exit(cpu
, RTC_IRQ
);
650 setup_profiling_timer(unsigned int multiplier
)
657 send_ipi_message(unsigned long to_whom
, enum ipi_message_type operation
)
661 /* Reduce the number of memory barriers by doing two loops,
662 one to set the bits, one to invoke the interrupts. */
664 mb(); /* Order out-of-band data and bit setting. */
666 for (i
= 0, j
= 1; i
< NR_CPUS
; ++i
, j
<<= 1) {
668 set_bit(operation
, &ipi_data
[i
].bits
);
671 mb(); /* Order bit setting and interrupt. */
673 for (i
= 0, j
= 1; i
< NR_CPUS
; ++i
, j
<<= 1) {
679 /* Structure and data for smp_call_function. This is designed to
680 minimize static memory requirements. Plus it looks cleaner. */
682 struct smp_call_struct
{
683 void (*func
) (void *info
);
686 atomic_t unstarted_count
;
687 atomic_t unfinished_count
;
690 static struct smp_call_struct
*smp_call_function_data
;
692 /* Atomicly drop data into a shared pointer. The pointer is free if
693 it is initially locked. If retry, spin until free. */
696 pointer_lock (void *lock
, void *data
, int retry
)
702 /* Compare and swap with zero. */
710 : "=&r"(old
), "=m"(*(void **)lock
), "=&r"(tmp
)
719 while (*(void **)lock
)
725 handle_ipi(struct pt_regs
*regs
)
727 int this_cpu
= smp_processor_id();
728 unsigned long *pending_ipis
= &ipi_data
[this_cpu
].bits
;
732 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
733 this_cpu
, *pending_ipis
, regs
->pc
));
736 mb(); /* Order interrupt and bit testing. */
737 while ((ops
= xchg(pending_ipis
, 0)) != 0) {
738 mb(); /* Order bit clearing and data access. */
746 if (which
== IPI_RESCHEDULE
) {
747 /* Reschedule callback. Everything to be done
748 is done by the interrupt return path. */
750 else if (which
== IPI_CALL_FUNC
) {
751 struct smp_call_struct
*data
;
752 void (*func
)(void *info
);
756 data
= smp_call_function_data
;
761 /* Notify the sending CPU that the data has been
762 received, and execution is about to begin. */
764 atomic_dec (&data
->unstarted_count
);
766 /* At this point the structure may be gone unless
770 /* Notify the sending CPU that the task is done. */
772 if (wait
) atomic_dec (&data
->unfinished_count
);
774 else if (which
== IPI_CPU_STOP
) {
778 printk(KERN_CRIT
"Unknown IPI on CPU %d: %lu\n",
783 mb(); /* Order data access and bit testing. */
786 cpu_data
[this_cpu
].ipi_count
++;
789 recv_secondary_console_msg();
793 smp_send_reschedule(int cpu
)
796 if (cpu
== hard_smp_processor_id())
798 "smp_send_reschedule: Sending IPI to self.\n");
800 send_ipi_message(1L << cpu
, IPI_RESCHEDULE
);
806 unsigned long to_whom
= cpu_present_mask
^ (1L << smp_processor_id());
808 if (hard_smp_processor_id() != boot_cpu_id
)
809 printk(KERN_WARNING
"smp_send_stop: Not on boot cpu.\n");
811 send_ipi_message(to_whom
, IPI_CPU_STOP
);
815 * Run a function on all other CPUs.
816 * <func> The function to run. This must be fast and non-blocking.
817 * <info> An arbitrary pointer to pass to the function.
818 * <retry> If true, keep retrying until ready.
819 * <wait> If true, wait until function has completed on other CPUs.
820 * [RETURNS] 0 on success, else a negative status code.
822 * Does not return until remote CPUs are nearly ready to execute <func>
823 * or are or have executed.
827 smp_call_function (void (*func
) (void *info
), void *info
, int retry
, int wait
)
829 unsigned long to_whom
= cpu_present_mask
^ (1L << smp_processor_id());
830 struct smp_call_struct data
;
836 atomic_set(&data
.unstarted_count
, smp_num_cpus
- 1);
837 atomic_set(&data
.unfinished_count
, smp_num_cpus
- 1);
839 /* Aquire the smp_call_function_data mutex. */
840 if (pointer_lock(&smp_call_function_data
, &data
, retry
))
843 /* Send a message to all other CPUs. */
844 send_ipi_message(to_whom
, IPI_CALL_FUNC
);
846 /* Wait for a minimal response. */
847 timeout
= jiffies
+ HZ
;
848 while (atomic_read (&data
.unstarted_count
) > 0
849 && time_before (jiffies
, timeout
))
852 /* We either got one or timed out -- clear the lock. */
854 smp_call_function_data
= 0;
855 if (atomic_read (&data
.unstarted_count
) > 0)
858 /* Wait for a complete response, if needed. */
860 while (atomic_read (&data
.unfinished_count
) > 0)
868 ipi_imb(void *ignored
)
876 /* Must wait other processors to flush their icache before continue. */
877 if (smp_call_function(ipi_imb
, NULL
, 1, 1))
878 printk(KERN_CRIT
"smp_imb: timed out\n");
884 ipi_flush_tlb_all(void *ignored
)
892 /* Although we don't have any data to pass, we do want to
893 synchronize with the other processors. */
894 if (smp_call_function(ipi_flush_tlb_all
, NULL
, 1, 1)) {
895 printk(KERN_CRIT
"flush_tlb_all: timed out\n");
902 ipi_flush_tlb_mm(void *x
)
904 struct mm_struct
*mm
= (struct mm_struct
*) x
;
905 if (mm
== current
->active_mm
)
906 flush_tlb_current(mm
);
910 flush_tlb_mm(struct mm_struct
*mm
)
912 if (mm
== current
->active_mm
) {
913 flush_tlb_current(mm
);
914 if (atomic_read(&mm
->mm_users
) <= 1)
919 if (smp_call_function(ipi_flush_tlb_mm
, mm
, 1, 1)) {
920 printk(KERN_CRIT
"flush_tlb_mm: timed out\n");
924 struct flush_tlb_page_struct
{
925 struct vm_area_struct
*vma
;
926 struct mm_struct
*mm
;
931 ipi_flush_tlb_page(void *x
)
933 struct flush_tlb_page_struct
*data
= (struct flush_tlb_page_struct
*)x
;
934 if (data
->mm
== current
->active_mm
)
935 flush_tlb_current_page(data
->mm
, data
->vma
, data
->addr
);
939 flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
941 struct flush_tlb_page_struct data
;
942 struct mm_struct
*mm
= vma
->vm_mm
;
944 if (mm
== current
->active_mm
) {
945 flush_tlb_current_page(mm
, vma
, addr
);
946 if (atomic_read(&mm
->mm_users
) <= 1)
955 if (smp_call_function(ipi_flush_tlb_page
, &data
, 1, 1)) {
956 printk(KERN_CRIT
"flush_tlb_page: timed out\n");
961 flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
963 /* On the Alpha we always flush the whole user tlb. */
968 ipi_flush_icache_page(void *x
)
970 struct mm_struct
*mm
= (struct mm_struct
*) x
;
971 if (mm
== current
->active_mm
)
972 __load_new_mm_context(mm
);
976 flush_icache_page(struct vm_area_struct
*vma
, struct page
*page
)
978 struct mm_struct
*mm
= vma
->vm_mm
;
980 if ((vma
->vm_flags
& VM_EXEC
) == 0)
984 if (mm
== current
->active_mm
) {
985 __load_new_mm_context(mm
);
986 if (atomic_read(&mm
->mm_users
) <= 1)
990 if (smp_call_function(ipi_flush_icache_page
, mm
, 1, 1)) {
991 printk(KERN_CRIT
"flush_icache_page: timed out\n");
996 smp_info(char *buffer
)
998 return sprintf(buffer
,
999 "cpus active\t\t: %d\n"
1000 "cpu active mask\t\t: %016lx\n",
1001 smp_num_cpus
, cpu_present_mask
);
1006 spin_unlock(spinlock_t
* lock
)
1012 lock
->previous
= NULL
;
1014 lock
->base_file
= "none";
1019 debug_spin_lock(spinlock_t
* lock
, const char *base_file
, int line_no
)
1023 void *inline_pc
= __builtin_return_address(0);
1024 unsigned long started
= jiffies
;
1026 int cpu
= smp_processor_id();
1031 /* Use sub-sections to put the actual loop at the end
1032 of this object file's text section so as to perfect
1033 branch prediction. */
1034 __asm__
__volatile__(
1049 : "=r" (tmp
), "=m" (__dummy_lock(lock
)), "=r" (stuck
)
1050 : "1" (__dummy_lock(lock
)), "2" (stuck
));
1054 "%s:%d spinlock stuck in %s at %p(%d)"
1055 " owner %s at %p(%d) %s:%d\n",
1057 current
->comm
, inline_pc
, cpu
,
1058 lock
->task
->comm
, lock
->previous
,
1059 lock
->on_cpu
, lock
->base_file
, lock
->line_no
);
1065 /* Exiting. Got the lock. */
1067 lock
->previous
= inline_pc
;
1068 lock
->task
= current
;
1069 lock
->base_file
= base_file
;
1070 lock
->line_no
= line_no
;
1074 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1075 base_file
, line_no
, current
->comm
, inline_pc
,
1076 cpu
, jiffies
- started
);
1081 debug_spin_trylock(spinlock_t
* lock
, const char *base_file
, int line_no
)
1084 if ((ret
= !test_and_set_bit(0, lock
))) {
1085 lock
->on_cpu
= smp_processor_id();
1086 lock
->previous
= __builtin_return_address(0);
1087 lock
->task
= current
;
1089 lock
->base_file
= base_file
;
1090 lock
->line_no
= line_no
;
1094 #endif /* DEBUG_SPINLOCK */
1097 void write_lock(rwlock_t
* lock
)
1100 int stuck_lock
, stuck_reader
;
1101 void *inline_pc
= __builtin_return_address(0);
1106 stuck_reader
= 1<<26;
1108 __asm__
__volatile__(
1117 "6: blt %3,4b # debug\n"
1118 " subl %3,1,%3 # debug\n"
1121 "8: blt %4,4b # debug\n"
1122 " subl %4,1,%4 # debug\n"
1127 : "=m" (__dummy_lock(lock
)), "=&r" (regx
), "=&r" (regy
),
1128 "=&r" (stuck_lock
), "=&r" (stuck_reader
)
1129 : "0" (__dummy_lock(lock
)), "3" (stuck_lock
), "4" (stuck_reader
));
1131 if (stuck_lock
< 0) {
1132 printk(KERN_WARNING
"write_lock stuck at %p\n", inline_pc
);
1135 if (stuck_reader
< 0) {
1136 printk(KERN_WARNING
"write_lock stuck on readers at %p\n",
1142 void read_lock(rwlock_t
* lock
)
1146 void *inline_pc
= __builtin_return_address(0);
1152 __asm__
__volatile__(
1161 " blt %2,4b # debug\n"
1162 " subl %2,1,%2 # debug\n"
1166 : "=m" (__dummy_lock(lock
)), "=&r" (regx
), "=&r" (stuck_lock
)
1167 : "0" (__dummy_lock(lock
)), "2" (stuck_lock
));
1169 if (stuck_lock
< 0) {
1170 printk(KERN_WARNING
"read_lock stuck at %p\n", inline_pc
);
1174 #endif /* DEBUG_RWLOCK */