2 * linux/arch/alpha/kernel/smp.c
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
18 #include <asm/hwrpb.h>
19 #include <asm/ptrace.h>
20 #include <asm/atomic.h>
24 #include <asm/bitops.h>
25 #include <asm/pgtable.h>
26 #include <asm/hardirq.h>
27 #include <asm/softirq.h>
29 #define __KERNEL_SYSCALLS__
30 #include <asm/unistd.h>
38 #define DBGS(args) printk args
43 /* A collection of per-processor data. */
44 struct cpuinfo_alpha cpu_data
[NR_CPUS
];
46 /* A collection of single bit ipi messages. */
48 unsigned long bits __cacheline_aligned
;
51 enum ipi_message_type
{
57 spinlock_t kernel_flag __cacheline_aligned
= SPIN_LOCK_UNLOCKED
;
59 /* Set to a secondary's cpuid when it comes online. */
60 static unsigned long smp_secondary_alive
;
62 unsigned long cpu_present_mask
; /* Which cpus ids came online. */
64 static int max_cpus
= -1; /* Command-line limitation. */
65 int smp_boot_cpuid
; /* Which processor we booted from. */
66 int smp_num_probed
; /* Internal processor count */
67 int smp_num_cpus
= 1; /* Number that came online. */
68 int smp_threads_ready
; /* True once the per process idle is forked. */
69 cycles_t cacheflush_time
;
71 int cpu_number_map
[NR_CPUS
];
72 int __cpu_logical_map
[NR_CPUS
];
74 extern void calibrate_delay(void);
75 extern asmlinkage
void entInt(void);
78 static int __init
nosmp(char *str
)
84 __setup("nosmp", nosmp
);
86 static int __init
maxcpus(char *str
)
88 get_option(&str
, &max_cpus
);
92 __setup("maxcpus", maxcpus
);
96 * Called by both boot and secondaries to move global data into
97 * per-processor storage.
99 static inline void __init
100 smp_store_cpu_info(int cpuid
)
102 cpu_data
[cpuid
].loops_per_sec
= loops_per_sec
;
103 cpu_data
[cpuid
].last_asn
104 = (cpuid
<< WIDTH_HARDWARE_ASN
) + ASN_FIRST_VERSION
;
105 cpu_data
[cpuid
].irq_count
= 0;
106 cpu_data
[cpuid
].bh_count
= 0;
110 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
112 static inline void __init
113 smp_setup_percpu_timer(int cpuid
)
115 cpu_data
[cpuid
].prof_counter
= 1;
116 cpu_data
[cpuid
].prof_multiplier
= 1;
120 * Where secondaries begin a life of C.
125 int cpuid
= hard_smp_processor_id();
127 DBGS(("CALLIN %d state 0x%lx\n", cpuid
, current
->state
));
129 /* Turn on machine checks. */
132 /* Set trap vectors. */
135 /* Set interrupt vector. */
138 /* Setup the scheduler for this processor. */
141 /* ??? This should be in init_idle. */
142 atomic_inc(&init_mm
.mm_count
);
143 current
->active_mm
= &init_mm
;
145 /* Get our local ticker going. */
146 smp_setup_percpu_timer(cpuid
);
148 /* Must have completely accurate bogos. */
151 smp_store_cpu_info(cpuid
);
153 /* Allow master to continue. */
155 smp_secondary_alive
= cpuid
;
157 /* Wait for the go code. */
158 while (!smp_threads_ready
)
161 DBGS(("smp_callin: commencing CPU %d current %p\n",
170 * Rough estimation for SMP scheduling, this is the number of cycles it
171 * takes for a fully memory-limited process to flush the SMP-local cache.
173 * We are not told how much cache there is, so we have to guess.
176 smp_tune_scheduling (void)
178 struct percpu_struct
*cpu
;
179 unsigned long on_chip_cache
;
182 cpu
= (struct percpu_struct
*)((char*)hwrpb
+ hwrpb
->processor_offset
);
186 on_chip_cache
= 16 + 16;
191 on_chip_cache
= 8 + 8 + 96;
195 on_chip_cache
= 16 + 8;
199 on_chip_cache
= 64 + 64;
203 on_chip_cache
= 8 + 8;
207 freq
= hwrpb
->cycle_freq
? : est_cycle_freq
;
209 /* Magic estimation stolen from x86 port. */
210 cacheflush_time
= freq
/ 1024 * on_chip_cache
/ 5000;
214 * Send a message to a secondary's console. "START" is one such
215 * interesting message. ;-)
218 send_secondary_console_msg(char *str
, int cpuid
)
220 struct percpu_struct
*cpu
;
221 register char *cp1
, *cp2
;
222 unsigned long cpumask
;
226 cpu
= (struct percpu_struct
*)
228 + hwrpb
->processor_offset
229 + cpuid
* hwrpb
->processor_size
);
231 cpumask
= (1L << cpuid
);
232 if (hwrpb
->txrdy
& cpumask
)
238 *(unsigned int *)&cpu
->ipc_buffer
[0] = len
;
239 cp1
= (char *) &cpu
->ipc_buffer
[1];
240 memcpy(cp1
, cp2
, len
);
242 /* atomic test and set */
244 set_bit(cpuid
, &hwrpb
->rxrdy
);
246 if (hwrpb
->txrdy
& cpumask
)
252 /* Wait one second. Note that jiffies aren't ticking yet. */
253 for (timeout
= 100000; timeout
> 0; --timeout
) {
254 if (!(hwrpb
->txrdy
& cpumask
))
262 /* Wait one second. */
263 for (timeout
= 100000; timeout
> 0; --timeout
) {
264 if (!(hwrpb
->txrdy
& cpumask
))
272 printk("Processor %x not ready\n", cpuid
);
277 * A secondary console wants to send a message. Receive it.
280 recv_secondary_console_msg(void)
283 unsigned long txrdy
= hwrpb
->txrdy
;
284 char *cp1
, *cp2
, buf
[80];
285 struct percpu_struct
*cpu
;
287 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy
));
289 mycpu
= hard_smp_processor_id();
291 for (i
= 0; i
< NR_CPUS
; i
++) {
292 if (!(txrdy
& (1L << i
)))
295 DBGS(("recv_secondary_console_msg: "
296 "TXRDY contains CPU %d.\n", i
));
298 cpu
= (struct percpu_struct
*)
300 + hwrpb
->processor_offset
301 + i
* hwrpb
->processor_size
);
303 DBGS(("recv_secondary_console_msg: on %d from %d"
304 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
305 mycpu
, i
, cpu
->halt_reason
, cpu
->flags
));
307 cnt
= cpu
->ipc_buffer
[0] >> 32;
308 if (cnt
<= 0 || cnt
>= 80)
309 strcpy(buf
, "<<< BOGUS MSG >>>");
311 cp1
= (char *) &cpu
->ipc_buffer
[11];
315 while ((cp2
= strchr(cp2
, '\r')) != 0) {
322 printk(KERN_INFO
"recv_secondary_console_msg: on %d "
323 "message is '%s'\n", mycpu
, buf
);
330 * Convince the console to have a secondary cpu begin execution.
333 secondary_cpu_start(int cpuid
, struct task_struct
*idle
)
335 struct percpu_struct
*cpu
;
336 struct pcb_struct
*hwpcb
;
339 cpu
= (struct percpu_struct
*)
341 + hwrpb
->processor_offset
342 + cpuid
* hwrpb
->processor_size
);
343 hwpcb
= (struct pcb_struct
*) cpu
->hwpcb
;
345 /* Initialize the CPU's HWPCB to something just good enough for
346 us to get started. Immediately after starting, we'll swpctx
347 to the target idle task's ptb. Reuse the stack in the mean
348 time. Precalculate the target PCBB. */
349 hwpcb
->ksp
= (unsigned long) idle
+ sizeof(union task_union
) - 16;
351 hwpcb
->ptbr
= idle
->thread
.ptbr
;
354 hwpcb
->unique
= virt_to_phys(&idle
->thread
);
355 hwpcb
->flags
= idle
->thread
.pal_flags
;
356 hwpcb
->res1
= hwpcb
->res2
= 0;
358 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
359 hwpcb
->ksp
, hwpcb
->ptbr
, hwrpb
->vptb
, hwcpb
->unique
));
360 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
361 cpuid
, idle
->state
, idle
->thread
.pal_flags
));
363 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
364 hwrpb
->CPU_restart
= __smp_callin
;
365 hwrpb
->CPU_restart_data
= (unsigned long) __smp_callin
;
367 /* Recalculate and update the HWRPB checksum */
368 hwrpb_update_checksum(hwrpb
);
371 * Send a "start" command to the specified processor.
374 /* SRM III 3.4.1.3 */
375 cpu
->flags
|= 0x22; /* turn on Context Valid and Restart Capable */
376 cpu
->flags
&= ~1; /* turn off Bootstrap In Progress */
379 send_secondary_console_msg("START\r\n", cpuid
);
381 /* Wait 1 second for an ACK from the console. Note that jiffies
382 aren't ticking yet. */
383 for (timeout
= 100000; timeout
> 0; timeout
--) {
389 printk(KERN_ERR
"SMP: Processor %d failed to start.\n", cpuid
);
393 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid
));
398 * Bring one cpu online.
401 smp_boot_one_cpu(int cpuid
, int cpunum
)
403 struct task_struct
*idle
;
406 /* Cook up an idler for this guy. Note that the address we give
407 to kernel_thread is irrelevant -- it's going to start where
408 HWRPB.CPU_restart says to start. But this gets all the other
409 task-y sort of data structures set up like we wish. */
410 kernel_thread((void *)__smp_callin
, NULL
, CLONE_PID
|CLONE_VM
);
412 idle
= init_task
.prev_task
;
414 panic("No idle process for CPU %d", cpunum
);
415 del_from_runqueue(idle
);
416 init_tasks
[cpunum
] = idle
;
417 idle
->processor
= cpuid
;
419 /* Schedule the first task manually. */
420 /* ??? Ingo, what is this? */
423 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
424 cpuid
, idle
->state
, idle
->flags
));
426 /* The secondary will change this once it is happy. Note that
427 secondary_cpu_start contains the necessary memory barrier. */
428 smp_secondary_alive
= -1;
430 /* Whirrr, whirrr, whirrrrrrrrr... */
431 if (secondary_cpu_start(cpuid
, idle
))
434 /* We've been acked by the console; wait one second for the task
435 to start up for real. Note that jiffies aren't ticking yet. */
436 for (timeout
= 0; timeout
< 100000; timeout
++) {
437 if (smp_secondary_alive
!= -1)
443 printk(KERN_ERR
"SMP: Processor %d is stuck.\n", cpuid
);
447 /* Another "Red Snapper". */
448 cpu_number_map
[cpuid
] = cpunum
;
449 __cpu_logical_map
[cpunum
] = cpuid
;
454 * Called from setup_arch. Detect an SMP system and which processors
460 struct percpu_struct
*cpubase
, *cpu
;
463 smp_boot_cpuid
= hard_smp_processor_id();
464 if (smp_boot_cpuid
!= 0) {
465 printk(KERN_WARNING
"SMP: Booting off cpu %d instead of 0?\n",
469 if (hwrpb
->nr_processors
> 1) {
472 DBGS(("setup_smp: nr_processors %ld\n",
473 hwrpb
->nr_processors
));
475 cpubase
= (struct percpu_struct
*)
476 ((char*)hwrpb
+ hwrpb
->processor_offset
);
477 boot_cpu_palrev
= cpubase
->pal_revision
;
479 for (i
= 0; i
< hwrpb
->nr_processors
; i
++ ) {
480 cpu
= (struct percpu_struct
*)
481 ((char *)cpubase
+ i
*hwrpb
->processor_size
);
482 if ((cpu
->flags
& 0x1cc) == 0x1cc) {
484 /* Assume here that "whami" == index */
485 cpu_present_mask
|= (1L << i
);
486 cpu
->pal_revision
= boot_cpu_palrev
;
489 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
490 i
, cpu
->flags
, cpu
->type
));
491 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
492 i
, cpu
->pal_revision
));
496 cpu_present_mask
= (1L << smp_boot_cpuid
);
499 printk(KERN_INFO
"SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
500 smp_num_probed
, cpu_present_mask
);
504 * Called by smp_init bring all the secondaries online and hold them.
510 unsigned long bogosum
;
512 /* Take care of some initial bookkeeping. */
513 memset(cpu_number_map
, -1, sizeof(cpu_number_map
));
514 memset(__cpu_logical_map
, -1, sizeof(__cpu_logical_map
));
515 memset(ipi_data
, 0, sizeof(ipi_data
));
517 cpu_number_map
[smp_boot_cpuid
] = 0;
518 __cpu_logical_map
[0] = smp_boot_cpuid
;
519 current
->processor
= smp_boot_cpuid
;
521 smp_store_cpu_info(smp_boot_cpuid
);
522 smp_tune_scheduling();
523 smp_setup_percpu_timer(smp_boot_cpuid
);
527 /* ??? This should be in init_idle. */
528 atomic_inc(&init_mm
.mm_count
);
529 current
->active_mm
= &init_mm
;
531 /* Nothing to do on a UP box, or when told not to. */
532 if (smp_num_probed
== 1 || max_cpus
== 0) {
533 printk(KERN_INFO
"SMP mode deactivated.\n");
537 printk(KERN_INFO
"SMP starting up secondaries.\n");
540 for (i
= 0; i
< NR_CPUS
; i
++) {
541 if (i
== smp_boot_cpuid
)
544 if (((cpu_present_mask
>> i
) & 1) == 0)
547 if (smp_boot_one_cpu(i
, cpu_count
))
553 if (cpu_count
== 1) {
554 printk(KERN_ERR
"SMP: Only one lonely processor alive.\n");
559 for (i
= 0; i
< NR_CPUS
; i
++) {
560 if (cpu_present_mask
& (1L << i
))
561 bogosum
+= cpu_data
[i
].loops_per_sec
;
563 printk(KERN_INFO
"SMP: Total of %d processors activated "
564 "(%lu.%02lu BogoMIPS).\n",
565 cpu_count
, (bogosum
+ 2500) / 500000,
566 ((bogosum
+ 2500) / 5000) % 100);
568 smp_num_cpus
= cpu_count
;
572 * Called by smp_init to release the blocking online cpus once they
578 /* smp_init sets smp_threads_ready -- that's enough. */
583 * Only broken Intel needs this, thus it should not even be
584 * referenced globally.
588 initialize_secondary(void)
593 extern void update_one_process(struct task_struct
*p
, unsigned long ticks
,
594 unsigned long user
, unsigned long system
,
598 smp_percpu_timer_interrupt(struct pt_regs
*regs
)
600 int cpu
= smp_processor_id();
601 unsigned long user
= user_mode(regs
);
602 struct cpuinfo_alpha
*data
= &cpu_data
[cpu
];
604 /* Record kernel PC. */
606 alpha_do_profile(regs
->pc
);
608 if (!--data
->prof_counter
) {
609 /* We need to make like a normal interrupt -- otherwise
610 timer interrupts ignore the global interrupt lock,
611 which would be a Bad Thing. */
612 irq_enter(cpu
, TIMER_IRQ
);
614 update_one_process(current
, 1, user
, !user
, cpu
);
616 if (--current
->counter
<= 0) {
617 current
->counter
= 0;
618 current
->need_resched
= 1;
622 if (current
->priority
< DEF_PRIORITY
) {
624 kstat
.per_cpu_nice
[cpu
]++;
627 kstat
.per_cpu_user
[cpu
]++;
631 kstat
.per_cpu_system
[cpu
]++;
635 data
->prof_counter
= data
->prof_multiplier
;
636 irq_exit(cpu
, TIMER_IRQ
);
641 setup_profiling_timer(unsigned int multiplier
)
648 send_ipi_message(unsigned long to_whom
, enum ipi_message_type operation
)
652 /* Reduce the number of memory barriers by doing two loops,
653 one to set the bits, one to invoke the interrupts. */
655 mb(); /* Order out-of-band data and bit setting. */
657 for (i
= 0, j
= 1; i
< NR_CPUS
; ++i
, j
<<= 1) {
659 set_bit(operation
, &ipi_data
[i
].bits
);
662 mb(); /* Order bit setting and interrupt. */
664 for (i
= 0, j
= 1; i
< NR_CPUS
; ++i
, j
<<= 1) {
670 /* Structure and data for smp_call_function. This is designed to
671 minimize static memory requirements. Plus it looks cleaner. */
673 struct smp_call_struct
{
674 void (*func
) (void *info
);
677 atomic_t unstarted_count
;
678 atomic_t unfinished_count
;
681 static struct smp_call_struct
*smp_call_function_data
;
683 /* Atomicly drop data into a shared pointer. The pointer is free if
684 it is initially locked. If retry, spin until free. */
687 pointer_lock (void *lock
, void *data
, int retry
)
693 /* Compare and swap with zero. */
701 : "=&r"(old
), "=m"(*(void **)lock
), "=&r"(tmp
)
710 while (*(void **)lock
)
716 handle_ipi(struct pt_regs
*regs
)
718 int this_cpu
= smp_processor_id();
719 unsigned long *pending_ipis
= &ipi_data
[this_cpu
].bits
;
722 DBGS(("handle_ipi: on CPU %d ops 0x%x PC 0x%lx\n",
723 this_cpu
, *pending_ipis
, regs
->pc
));
725 mb(); /* Order interrupt and bit testing. */
726 while ((ops
= xchg(pending_ipis
, 0)) != 0) {
727 mb(); /* Order bit clearing and data access. */
735 if (which
== IPI_RESCHEDULE
) {
736 /* Reschedule callback. Everything to be done
737 is done by the interrupt return path. */
739 else if (which
== IPI_CALL_FUNC
) {
740 struct smp_call_struct
*data
;
741 void (*func
)(void *info
);
745 data
= smp_call_function_data
;
750 /* Notify the sending CPU that the data has been
751 received, and execution is about to begin. */
753 atomic_dec (&data
->unstarted_count
);
755 /* At this point the structure may be gone unless
759 /* Notify the sending CPU that the task is done. */
761 if (wait
) atomic_dec (&data
->unfinished_count
);
763 else if (which
== IPI_CPU_STOP
) {
767 printk(KERN_CRIT
"Unknown IPI on CPU %d: %lu\n",
772 mb(); /* Order data access and bit testing. */
775 cpu_data
[this_cpu
].ipi_count
++;
778 recv_secondary_console_msg();
782 smp_send_reschedule(int cpu
)
785 if (cpu
== hard_smp_processor_id())
787 "smp_send_reschedule: Sending IPI to self.\n");
789 send_ipi_message(1L << cpu
, IPI_RESCHEDULE
);
795 unsigned long to_whom
= cpu_present_mask
^ (1L << smp_processor_id());
797 if (hard_smp_processor_id() != boot_cpu_id
)
798 printk(KERN_WARNING
"smp_send_stop: Not on boot cpu.\n");
800 send_ipi_message(to_whom
, IPI_CPU_STOP
);
804 * Run a function on all other CPUs.
805 * <func> The function to run. This must be fast and non-blocking.
806 * <info> An arbitrary pointer to pass to the function.
807 * <retry> If true, keep retrying until ready.
808 * <wait> If true, wait until function has completed on other CPUs.
809 * [RETURNS] 0 on success, else a negative status code.
811 * Does not return until remote CPUs are nearly ready to execute <func>
812 * or are or have executed.
816 smp_call_function (void (*func
) (void *info
), void *info
, int retry
, int wait
)
818 unsigned long to_whom
= cpu_present_mask
^ (1L << smp_processor_id());
819 struct smp_call_struct data
;
825 atomic_set(&data
.unstarted_count
, smp_num_cpus
- 1);
826 atomic_set(&data
.unfinished_count
, smp_num_cpus
- 1);
828 /* Aquire the smp_call_function_data mutex. */
829 if (pointer_lock(&smp_call_function_data
, &data
, retry
))
832 /* Send a message to all other CPUs. */
833 send_ipi_message(to_whom
, IPI_CALL_FUNC
);
835 /* Wait for a minimal response. */
836 timeout
= jiffies
+ HZ
;
837 while (atomic_read (&data
.unstarted_count
) > 0
838 && time_before (jiffies
, timeout
))
841 /* We either got one or timed out -- clear the lock. */
843 smp_call_function_data
= 0;
844 if (atomic_read (&data
.unstarted_count
) > 0)
847 /* Wait for a complete response, if needed. */
849 while (atomic_read (&data
.unfinished_count
) > 0)
857 ipi_flush_tlb_all(void *ignored
)
865 /* Although we don't have any data to pass, we do want to
866 synchronize with the other processors. */
867 if (smp_call_function(ipi_flush_tlb_all
, NULL
, 1, 1)) {
868 printk(KERN_CRIT
"flush_tlb_all: timed out\n");
875 ipi_flush_tlb_mm(void *x
)
877 struct mm_struct
*mm
= (struct mm_struct
*) x
;
878 if (mm
== current
->active_mm
)
879 flush_tlb_current(mm
);
883 flush_tlb_mm(struct mm_struct
*mm
)
885 if (mm
== current
->active_mm
) {
886 flush_tlb_current(mm
);
887 if (atomic_read(&mm
->mm_users
) <= 1)
892 if (smp_call_function(ipi_flush_tlb_mm
, mm
, 1, 1)) {
893 printk(KERN_CRIT
"flush_tlb_mm: timed out\n");
897 struct flush_tlb_page_struct
{
898 struct vm_area_struct
*vma
;
899 struct mm_struct
*mm
;
904 ipi_flush_tlb_page(void *x
)
906 struct flush_tlb_page_struct
*data
= (struct flush_tlb_page_struct
*)x
;
907 if (data
->mm
== current
->active_mm
)
908 flush_tlb_current_page(data
->mm
, data
->vma
, data
->addr
);
912 flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
914 struct flush_tlb_page_struct data
;
915 struct mm_struct
*mm
= vma
->vm_mm
;
917 if (mm
== current
->active_mm
) {
918 flush_tlb_current_page(mm
, vma
, addr
);
919 if (atomic_read(&mm
->mm_users
) <= 1)
928 if (smp_call_function(ipi_flush_tlb_page
, &data
, 1, 1)) {
929 printk(KERN_CRIT
"flush_tlb_page: timed out\n");
934 flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
936 /* On the Alpha we always flush the whole user tlb. */
942 smp_info(char *buffer
)
945 unsigned long sum
= 0;
946 for (i
= 0; i
< NR_CPUS
; i
++)
947 sum
+= cpu_data
[i
].ipi_count
;
949 return sprintf(buffer
, "CPUs probed %d active %d map 0x%lx IPIs %ld\n",
950 smp_num_probed
, smp_num_cpus
, cpu_present_mask
, sum
);
956 spin_unlock(spinlock_t
* lock
)
962 lock
->previous
= NULL
;
964 lock
->base_file
= "none";
969 debug_spin_lock(spinlock_t
* lock
, const char *base_file
, int line_no
)
973 void *inline_pc
= __builtin_return_address(0);
974 unsigned long started
= jiffies
;
976 int cpu
= smp_processor_id();
981 /* Use sub-sections to put the actual loop at the end
982 of this object file's text section so as to perfect
983 branch prediction. */
984 __asm__
__volatile__(
992 ".section .text2,\"ax\"\n"
999 : "=r" (tmp
), "=m" (__dummy_lock(lock
)), "=r" (stuck
)
1000 : "1" (__dummy_lock(lock
)), "2" (stuck
));
1004 "%s:%d spinlock stuck in %s at %p(%d)"
1005 " owner %s at %p(%d) %s:%d\n",
1007 current
->comm
, inline_pc
, cpu
,
1008 lock
->task
->comm
, lock
->previous
,
1009 lock
->on_cpu
, lock
->base_file
, lock
->line_no
);
1015 /* Exiting. Got the lock. */
1017 lock
->previous
= inline_pc
;
1018 lock
->task
= current
;
1019 lock
->base_file
= base_file
;
1020 lock
->line_no
= line_no
;
1024 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1025 base_file
, line_no
, current
->comm
, inline_pc
,
1026 cpu
, jiffies
- started
);
1031 debug_spin_trylock(spinlock_t
* lock
, const char *base_file
, int line_no
)
1034 if ((ret
= !test_and_set_bit(0, lock
))) {
1035 lock
->on_cpu
= smp_processor_id();
1036 lock
->previous
= __builtin_return_address(0);
1037 lock
->task
= current
;
1039 lock
->base_file
= base_file
;
1040 lock
->line_no
= line_no
;
1044 #endif /* DEBUG_SPINLOCK */
1047 void write_lock(rwlock_t
* lock
)
1050 int stuck_lock
, stuck_reader
;
1051 void *inline_pc
= __builtin_return_address(0);
1056 stuck_reader
= 1<<26;
1058 __asm__
__volatile__(
1066 ".section .text2,\"ax\"\n"
1067 "6: blt %3,4b # debug\n"
1068 " subl %3,1,%3 # debug\n"
1071 "8: blt %4,4b # debug\n"
1072 " subl %4,1,%4 # debug\n"
1077 : "=m" (__dummy_lock(lock
)), "=&r" (regx
), "=&r" (regy
),
1078 "=&r" (stuck_lock
), "=&r" (stuck_reader
)
1079 : "0" (__dummy_lock(lock
)), "3" (stuck_lock
), "4" (stuck_reader
));
1081 if (stuck_lock
< 0) {
1082 printk(KERN_WARNING
"write_lock stuck at %p\n", inline_pc
);
1085 if (stuck_reader
< 0) {
1086 printk(KERN_WARNING
"write_lock stuck on readers at %p\n",
1092 void read_lock(rwlock_t
* lock
)
1096 void *inline_pc
= __builtin_return_address(0);
1102 __asm__
__volatile__(
1109 ".section .text2,\"ax\"\n"
1111 " blt %2,4b # debug\n"
1112 " subl %2,1,%2 # debug\n"
1116 : "=m" (__dummy_lock(lock
)), "=&r" (regx
), "=&r" (stuck_lock
)
1117 : "0" (__dummy_lock(lock
)), "2" (stuck_lock
));
1119 if (stuck_lock
< 0) {
1120 printk(KERN_WARNING
"read_lock stuck at %p\n", inline_pc
);
1124 #endif /* DEBUG_RWLOCK */