2 * linux/arch/alpha/kernel/smp.c
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/sched.h>
10 #include <linux/tasks.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/delay.h>
17 #include <asm/hwrpb.h>
18 #include <asm/ptrace.h>
19 #include <asm/atomic.h>
23 #include <asm/bitops.h>
24 #include <asm/pgtable.h>
25 #include <asm/spinlock.h>
26 #include <asm/hardirq.h>
27 #include <asm/softirq.h>
29 #define __KERNEL_SYSCALLS__
30 #include <asm/unistd.h>
38 #define DBGS(args) printk args
43 /* A collection of per-processor data. */
44 struct cpuinfo_alpha cpu_data
[NR_CPUS
];
46 /* A collection of single bit ipi messages. */
48 unsigned long bits __cacheline_aligned
;
51 enum ipi_message_type
{
57 spinlock_t kernel_flag __cacheline_aligned
= SPIN_LOCK_UNLOCKED
;
59 /* Set to a secondary's cpuid when it comes online. */
60 static unsigned long smp_secondary_alive
;
62 unsigned long cpu_present_mask
; /* Which cpus ids came online. */
64 static int max_cpus
= -1; /* Command-line limitation. */
65 int smp_boot_cpuid
; /* Which processor we booted from. */
66 int smp_num_probed
; /* Internal processor count */
67 int smp_num_cpus
= 1; /* Number that came online. */
68 int smp_threads_ready
; /* True once the per process idle is forked. */
69 cycles_t cacheflush_time
;
71 int cpu_number_map
[NR_CPUS
];
72 int __cpu_logical_map
[NR_CPUS
];
74 extern void calibrate_delay(void);
75 extern asmlinkage
void entInt(void);
79 * Process bootcommand SMP options, like "nosmp" and "maxcpus=".
82 smp_setup(char *str
, int *ints
)
84 if (ints
&& ints
[0] > 0)
91 * Called by both boot and secondaries to move global data into
92 * per-processor storage.
94 static inline void __init
95 smp_store_cpu_info(int cpuid
)
97 cpu_data
[cpuid
].loops_per_sec
= loops_per_sec
;
98 cpu_data
[cpuid
].last_asn
99 = (cpuid
<< WIDTH_HARDWARE_ASN
) + ASN_FIRST_VERSION
;
100 cpu_data
[cpuid
].irq_count
= 0;
101 cpu_data
[cpuid
].bh_count
= 0;
105 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
107 static inline void __init
108 smp_setup_percpu_timer(int cpuid
)
110 cpu_data
[cpuid
].prof_counter
= 1;
111 cpu_data
[cpuid
].prof_multiplier
= 1;
115 * Where secondaries begin a life of C.
120 int cpuid
= hard_smp_processor_id();
122 DBGS(("CALLIN %d state 0x%lx\n", cpuid
, current
->state
));
124 /* Turn on machine checks. */
127 /* Set trap vectors. */
130 /* Set interrupt vector. */
133 /* Setup the scheduler for this processor. */
136 /* Get our local ticker going. */
137 smp_setup_percpu_timer(cpuid
);
139 /* Must have completely accurate bogos. */
142 smp_store_cpu_info(cpuid
);
144 /* Allow master to continue. */
146 smp_secondary_alive
= cpuid
;
148 /* Wait for the go code. */
149 while (!smp_threads_ready
)
152 DBGS(("smp_callin: commencing CPU %d current %p\n",
161 * Rough estimation for SMP scheduling, this is the number of cycles it
162 * takes for a fully memory-limited process to flush the SMP-local cache.
164 * We are not told how much cache there is, so we have to guess.
167 smp_tune_scheduling (void)
169 struct percpu_struct
*cpu
;
170 unsigned long on_chip_cache
;
173 cpu
= (struct percpu_struct
*)((char*)hwrpb
+ hwrpb
->processor_offset
);
177 on_chip_cache
= 16 + 16;
182 on_chip_cache
= 8 + 8 + 96;
186 on_chip_cache
= 16 + 8;
190 on_chip_cache
= 64 + 64;
194 on_chip_cache
= 8 + 8;
198 freq
= hwrpb
->cycle_freq
? : est_cycle_freq
;
200 /* Magic estimation stolen from x86 port. */
201 cacheflush_time
= freq
/ 1024 * on_chip_cache
/ 5000;
205 * Send a message to a secondary's console. "START" is one such
206 * interesting message. ;-)
209 send_secondary_console_msg(char *str
, int cpuid
)
211 struct percpu_struct
*cpu
;
212 register char *cp1
, *cp2
;
213 unsigned long cpumask
;
217 cpu
= (struct percpu_struct
*)
219 + hwrpb
->processor_offset
220 + cpuid
* hwrpb
->processor_size
);
222 cpumask
= (1L << cpuid
);
223 if (hwrpb
->txrdy
& cpumask
)
229 *(unsigned int *)&cpu
->ipc_buffer
[0] = len
;
230 cp1
= (char *) &cpu
->ipc_buffer
[1];
231 memcpy(cp1
, cp2
, len
);
233 /* atomic test and set */
235 set_bit(cpuid
, &hwrpb
->rxrdy
);
237 if (hwrpb
->txrdy
& cpumask
)
243 /* Wait one second. Note that jiffies aren't ticking yet. */
244 for (timeout
= 100000; timeout
> 0; --timeout
) {
245 if (!(hwrpb
->txrdy
& cpumask
))
253 /* Wait one second. */
254 for (timeout
= 100000; timeout
> 0; --timeout
) {
255 if (!(hwrpb
->txrdy
& cpumask
))
263 printk("Processor %x not ready\n", cpuid
);
268 * A secondary console wants to send a message. Receive it.
271 recv_secondary_console_msg(void)
274 unsigned long txrdy
= hwrpb
->txrdy
;
275 char *cp1
, *cp2
, buf
[80];
276 struct percpu_struct
*cpu
;
278 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy
));
280 mycpu
= hard_smp_processor_id();
282 for (i
= 0; i
< NR_CPUS
; i
++) {
283 if (!(txrdy
& (1L << i
)))
286 DBGS(("recv_secondary_console_msg: "
287 "TXRDY contains CPU %d.\n", i
));
289 cpu
= (struct percpu_struct
*)
291 + hwrpb
->processor_offset
292 + i
* hwrpb
->processor_size
);
294 DBGS(("recv_secondary_console_msg: on %d from %d"
295 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
296 mycpu
, i
, cpu
->halt_reason
, cpu
->flags
));
298 cnt
= cpu
->ipc_buffer
[0] >> 32;
299 if (cnt
<= 0 || cnt
>= 80)
300 strcpy(buf
, "<<< BOGUS MSG >>>");
302 cp1
= (char *) &cpu
->ipc_buffer
[11];
306 while ((cp2
= strchr(cp2
, '\r')) != 0) {
313 printk(KERN_INFO
"recv_secondary_console_msg: on %d "
314 "message is '%s'\n", mycpu
, buf
);
321 * Convince the console to have a secondary cpu begin execution.
324 secondary_cpu_start(int cpuid
, struct task_struct
*idle
)
326 struct percpu_struct
*cpu
;
327 struct pcb_struct
*hwpcb
;
330 cpu
= (struct percpu_struct
*)
332 + hwrpb
->processor_offset
333 + cpuid
* hwrpb
->processor_size
);
334 hwpcb
= (struct pcb_struct
*) cpu
->hwpcb
;
336 /* Initialize the CPU's HWPCB to something just good enough for
337 us to get started. Immediately after starting, we'll swpctx
338 to the target idle task's ptb. Reuse the stack in the mean
339 time. Precalculate the target PCBB. */
340 hwpcb
->ksp
= (unsigned long) idle
+ sizeof(union task_union
) - 16;
342 hwpcb
->ptbr
= idle
->thread
.ptbr
;
345 hwpcb
->unique
= virt_to_phys(&idle
->thread
);
346 hwpcb
->flags
= idle
->thread
.pal_flags
;
347 hwpcb
->res1
= hwpcb
->res2
= 0;
349 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
350 hwpcb
->ksp
, hwpcb
->ptbr
, hwrpb
->vptb
, hwcpb
->unique
));
351 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
352 cpuid
, idle
->state
, idle
->thread
.pal_flags
));
354 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
355 hwrpb
->CPU_restart
= __smp_callin
;
356 hwrpb
->CPU_restart_data
= (unsigned long) __smp_callin
;
358 /* Recalculate and update the HWRPB checksum */
359 hwrpb_update_checksum(hwrpb
);
362 * Send a "start" command to the specified processor.
365 /* SRM III 3.4.1.3 */
366 cpu
->flags
|= 0x22; /* turn on Context Valid and Restart Capable */
367 cpu
->flags
&= ~1; /* turn off Bootstrap In Progress */
370 send_secondary_console_msg("START\r\n", cpuid
);
372 /* Wait 1 second for an ACK from the console. Note that jiffies
373 aren't ticking yet. */
374 for (timeout
= 100000; timeout
> 0; timeout
--) {
380 printk(KERN_ERR
"SMP: Processor %d failed to start.\n", cpuid
);
384 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid
));
389 * Bring one cpu online.
392 smp_boot_one_cpu(int cpuid
, int cpunum
)
394 struct task_struct
*idle
;
397 /* Cook up an idler for this guy. Note that the address we give
398 to kernel_thread is irrelevant -- it's going to start where
399 HWRPB.CPU_restart says to start. But this gets all the other
400 task-y sort of data structures set up like we wish. */
401 kernel_thread((void *)__smp_callin
, NULL
, CLONE_PID
|CLONE_VM
);
403 idle
= init_task
.prev_task
;
405 panic("No idle process for CPU %d", cpunum
);
406 del_from_runqueue(idle
);
407 init_tasks
[cpunum
] = idle
;
408 idle
->processor
= cpuid
;
410 /* Schedule the first task manually. */
411 /* ??? Ingo, what is this? */
414 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
415 cpuid
, idle
->state
, idle
->flags
));
417 /* The secondary will change this once it is happy. Note that
418 secondary_cpu_start contains the necessary memory barrier. */
419 smp_secondary_alive
= -1;
421 /* Whirrr, whirrr, whirrrrrrrrr... */
422 if (secondary_cpu_start(cpuid
, idle
))
425 /* We've been acked by the console; wait one second for the task
426 to start up for real. Note that jiffies aren't ticking yet. */
427 for (timeout
= 0; timeout
< 100000; timeout
++) {
428 if (smp_secondary_alive
!= -1)
434 printk(KERN_ERR
"SMP: Processor %d is stuck.\n", cpuid
);
438 /* Another "Red Snapper". */
439 cpu_number_map
[cpuid
] = cpunum
;
440 __cpu_logical_map
[cpunum
] = cpuid
;
445 * Called from setup_arch. Detect an SMP system and which processors
451 struct percpu_struct
*cpubase
, *cpu
;
454 smp_boot_cpuid
= hard_smp_processor_id();
455 if (smp_boot_cpuid
!= 0) {
456 printk(KERN_WARNING
"SMP: Booting off cpu %d instead of 0?\n",
460 if (hwrpb
->nr_processors
> 1) {
463 DBGS(("setup_smp: nr_processors %ld\n",
464 hwrpb
->nr_processors
));
466 cpubase
= (struct percpu_struct
*)
467 ((char*)hwrpb
+ hwrpb
->processor_offset
);
468 boot_cpu_palrev
= cpubase
->pal_revision
;
470 for (i
= 0; i
< hwrpb
->nr_processors
; i
++ ) {
471 cpu
= (struct percpu_struct
*)
472 ((char *)cpubase
+ i
*hwrpb
->processor_size
);
473 if ((cpu
->flags
& 0x1cc) == 0x1cc) {
475 /* Assume here that "whami" == index */
476 cpu_present_mask
|= (1L << i
);
477 cpu
->pal_revision
= boot_cpu_palrev
;
480 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
481 i
, cpu
->flags
, cpu
->type
));
482 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
483 i
, cpu
->pal_revision
));
487 cpu_present_mask
= (1L << smp_boot_cpuid
);
490 printk(KERN_INFO
"SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
491 smp_num_probed
, cpu_present_mask
);
495 * Called by smp_init bring all the secondaries online and hold them.
501 unsigned long bogosum
;
503 /* Take care of some initial bookkeeping. */
504 memset(cpu_number_map
, -1, sizeof(cpu_number_map
));
505 memset(__cpu_logical_map
, -1, sizeof(__cpu_logical_map
));
506 memset(ipi_data
, 0, sizeof(ipi_data
));
508 cpu_number_map
[smp_boot_cpuid
] = 0;
509 __cpu_logical_map
[0] = smp_boot_cpuid
;
510 current
->processor
= smp_boot_cpuid
;
512 smp_store_cpu_info(smp_boot_cpuid
);
513 smp_tune_scheduling();
514 smp_setup_percpu_timer(smp_boot_cpuid
);
518 /* Nothing to do on a UP box, or when told not to. */
519 if (smp_num_probed
== 1 || max_cpus
== 0) {
520 printk(KERN_INFO
"SMP mode deactivated.\n");
524 printk(KERN_INFO
"SMP starting up secondaries.\n");
527 for (i
= 0; i
< NR_CPUS
; i
++) {
528 if (i
== smp_boot_cpuid
)
531 if (((cpu_present_mask
>> i
) & 1) == 0)
534 if (smp_boot_one_cpu(i
, cpu_count
))
540 if (cpu_count
== 1) {
541 printk(KERN_ERR
"SMP: Only one lonely processor alive.\n");
546 for (i
= 0; i
< NR_CPUS
; i
++) {
547 if (cpu_present_mask
& (1L << i
))
548 bogosum
+= cpu_data
[i
].loops_per_sec
;
550 printk(KERN_INFO
"SMP: Total of %d processors activated "
551 "(%lu.%02lu BogoMIPS).\n",
552 cpu_count
, (bogosum
+ 2500) / 500000,
553 ((bogosum
+ 2500) / 5000) % 100);
555 smp_num_cpus
= cpu_count
;
559 * Called by smp_init to release the blocking online cpus once they
565 /* smp_init sets smp_threads_ready -- that's enough. */
570 * Only broken Intel needs this, thus it should not even be
571 * referenced globally.
575 initialize_secondary(void)
580 extern void update_one_process(struct task_struct
*p
, unsigned long ticks
,
581 unsigned long user
, unsigned long system
,
585 smp_percpu_timer_interrupt(struct pt_regs
*regs
)
587 int cpu
= smp_processor_id();
588 unsigned long user
= user_mode(regs
);
589 struct cpuinfo_alpha
*data
= &cpu_data
[cpu
];
591 /* Record kernel PC. */
593 alpha_do_profile(regs
->pc
);
595 if (!--data
->prof_counter
) {
596 /* We need to make like a normal interrupt -- otherwise
597 timer interrupts ignore the global interrupt lock,
598 which would be a Bad Thing. */
599 irq_enter(cpu
, TIMER_IRQ
);
601 update_one_process(current
, 1, user
, !user
, cpu
);
603 if (--current
->counter
<= 0) {
604 current
->counter
= 0;
605 current
->need_resched
= 1;
609 if (current
->priority
< DEF_PRIORITY
) {
611 kstat
.per_cpu_nice
[cpu
]++;
614 kstat
.per_cpu_user
[cpu
]++;
618 kstat
.per_cpu_system
[cpu
]++;
622 data
->prof_counter
= data
->prof_multiplier
;
623 irq_exit(cpu
, TIMER_IRQ
);
628 setup_profiling_timer(unsigned int multiplier
)
635 send_ipi_message(unsigned long to_whom
, enum ipi_message_type operation
)
639 /* Reduce the number of memory barriers by doing two loops,
640 one to set the bits, one to invoke the interrupts. */
642 mb(); /* Order out-of-band data and bit setting. */
644 for (i
= 0, j
= 1; i
< NR_CPUS
; ++i
, j
<<= 1) {
646 set_bit(operation
, &ipi_data
[i
].bits
);
649 mb(); /* Order bit setting and interrupt. */
651 for (i
= 0, j
= 1; i
< NR_CPUS
; ++i
, j
<<= 1) {
657 /* Structure and data for smp_call_function. This is designed to
658 minimize static memory requirements. Plus it looks cleaner. */
660 struct smp_call_struct
{
661 void (*func
) (void *info
);
664 atomic_t unstarted_count
;
665 atomic_t unfinished_count
;
668 static struct smp_call_struct
*smp_call_function_data
;
670 /* Atomicly drop data into a shared pointer. The pointer is free if
671 it is initially locked. If retry, spin until free. */
674 pointer_lock (void *lock
, void *data
, int retry
)
680 /* Compare and swap with zero. */
688 : "=&r"(old
), "=m"(*(void **)lock
), "=&r"(tmp
)
697 while (*(void **)lock
)
703 handle_ipi(struct pt_regs
*regs
)
705 int this_cpu
= smp_processor_id();
706 unsigned long *pending_ipis
= &ipi_data
[this_cpu
].bits
;
709 DBGS(("handle_ipi: on CPU %d ops 0x%x PC 0x%lx\n",
710 this_cpu
, *pending_ipis
, regs
->pc
));
712 mb(); /* Order interrupt and bit testing. */
713 while ((ops
= xchg(pending_ipis
, 0)) != 0) {
714 mb(); /* Order bit clearing and data access. */
722 if (which
== IPI_RESCHEDULE
) {
723 /* Reschedule callback. Everything to be done
724 is done by the interrupt return path. */
726 else if (which
== IPI_CALL_FUNC
) {
727 struct smp_call_struct
*data
;
728 void (*func
)(void *info
);
732 data
= smp_call_function_data
;
737 /* Notify the sending CPU that the data has been
738 received, and execution is about to begin. */
740 atomic_dec (&data
->unstarted_count
);
742 /* At this point the structure may be gone unless
746 /* Notify the sending CPU that the task is done. */
748 if (wait
) atomic_dec (&data
->unfinished_count
);
750 else if (which
== IPI_CPU_STOP
) {
754 printk(KERN_CRIT
"Unknown IPI on CPU %d: %lu\n",
759 mb(); /* Order data access and bit testing. */
762 cpu_data
[this_cpu
].ipi_count
++;
765 recv_secondary_console_msg();
769 smp_send_reschedule(int cpu
)
772 if (cpu
== hard_smp_processor_id())
774 "smp_send_reschedule: Sending IPI to self.\n");
776 send_ipi_message(1L << cpu
, IPI_RESCHEDULE
);
782 unsigned long to_whom
= cpu_present_mask
^ (1L << smp_processor_id());
784 if (hard_smp_processor_id() != boot_cpu_id
)
785 printk(KERN_WARNING
"smp_send_stop: Not on boot cpu.\n");
787 send_ipi_message(to_whom
, IPI_CPU_STOP
);
791 * Run a function on all other CPUs.
792 * <func> The function to run. This must be fast and non-blocking.
793 * <info> An arbitrary pointer to pass to the function.
794 * <retry> If true, keep retrying until ready.
795 * <wait> If true, wait until function has completed on other CPUs.
796 * [RETURNS] 0 on success, else a negative status code.
798 * Does not return until remote CPUs are nearly ready to execute <func>
799 * or are or have executed.
803 smp_call_function (void (*func
) (void *info
), void *info
, int retry
, int wait
)
805 unsigned long to_whom
= cpu_present_mask
^ (1L << smp_processor_id());
806 struct smp_call_struct data
;
812 atomic_set(&data
.unstarted_count
, smp_num_cpus
- 1);
813 atomic_set(&data
.unfinished_count
, smp_num_cpus
- 1);
815 /* Aquire the smp_call_function_data mutex. */
816 if (pointer_lock(&smp_call_function_data
, &data
, retry
))
819 /* Send a message to all other CPUs. */
820 send_ipi_message(to_whom
, IPI_CALL_FUNC
);
822 /* Wait for a minimal response. */
823 timeout
= jiffies
+ HZ
;
824 while (atomic_read (&data
.unstarted_count
) > 0
825 && time_before (jiffies
, timeout
))
828 /* We either got one or timed out -- clear the lock. */
830 smp_call_function_data
= 0;
831 if (atomic_read (&data
.unstarted_count
) > 0)
834 /* Wait for a complete response, if needed. */
836 while (atomic_read (&data
.unfinished_count
) > 0)
844 ipi_flush_tlb_all(void *ignored
)
852 /* Although we don't have any data to pass, we do want to
853 synchronize with the other processors. */
854 if (smp_call_function(ipi_flush_tlb_all
, NULL
, 1, 1)) {
855 printk(KERN_CRIT
"flush_tlb_all: timed out\n");
862 ipi_flush_tlb_mm(void *x
)
864 struct mm_struct
*mm
= (struct mm_struct
*) x
;
865 if (mm
== current
->mm
)
866 flush_tlb_current(mm
);
870 flush_tlb_mm(struct mm_struct
*mm
)
872 if (mm
== current
->mm
) {
873 flush_tlb_current(mm
);
874 if (atomic_read(&mm
->count
) == 1)
879 if (smp_call_function(ipi_flush_tlb_mm
, mm
, 1, 1)) {
880 printk(KERN_CRIT
"flush_tlb_mm: timed out\n");
884 struct flush_tlb_page_struct
{
885 struct vm_area_struct
*vma
;
886 struct mm_struct
*mm
;
891 ipi_flush_tlb_page(void *x
)
893 struct flush_tlb_page_struct
*data
= (struct flush_tlb_page_struct
*)x
;
894 if (data
->mm
== current
->mm
)
895 flush_tlb_current_page(data
->mm
, data
->vma
, data
->addr
);
899 flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
901 struct flush_tlb_page_struct data
;
902 struct mm_struct
*mm
= vma
->vm_mm
;
904 if (mm
== current
->mm
) {
905 flush_tlb_current_page(mm
, vma
, addr
);
906 if (atomic_read(&mm
->count
) == 1)
915 if (smp_call_function(ipi_flush_tlb_page
, &data
, 1, 1)) {
916 printk(KERN_CRIT
"flush_tlb_page: timed out\n");
921 flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
923 /* On the Alpha we always flush the whole user tlb. */
929 smp_info(char *buffer
)
932 unsigned long sum
= 0;
933 for (i
= 0; i
< NR_CPUS
; i
++)
934 sum
+= cpu_data
[i
].ipi_count
;
936 return sprintf(buffer
, "CPUs probed %d active %d map 0x%lx IPIs %ld\n",
937 smp_num_probed
, smp_num_cpus
, cpu_present_mask
, sum
);
943 spin_unlock(spinlock_t
* lock
)
949 lock
->previous
= NULL
;
951 lock
->base_file
= "none";
956 debug_spin_lock(spinlock_t
* lock
, const char *base_file
, int line_no
)
960 void *inline_pc
= __builtin_return_address(0);
961 unsigned long started
= jiffies
;
963 int cpu
= smp_processor_id();
968 /* Use sub-sections to put the actual loop at the end
969 of this object file's text section so as to perfect
970 branch prediction. */
971 __asm__
__volatile__(
979 ".section .text2,\"ax\"\n"
986 : "=r" (tmp
), "=m" (__dummy_lock(lock
)), "=r" (stuck
)
987 : "1" (__dummy_lock(lock
)), "2" (stuck
));
991 "%s:%d spinlock stuck in %s at %p(%d)"
992 " owner %s at %p(%d) %s:%d\n",
994 current
->comm
, inline_pc
, cpu
,
995 lock
->task
->comm
, lock
->previous
,
996 lock
->on_cpu
, lock
->base_file
, lock
->line_no
);
1002 /* Exiting. Got the lock. */
1004 lock
->previous
= inline_pc
;
1005 lock
->task
= current
;
1006 lock
->base_file
= base_file
;
1007 lock
->line_no
= line_no
;
1011 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1012 base_file
, line_no
, current
->comm
, inline_pc
,
1013 cpu
, jiffies
- started
);
1018 debug_spin_trylock(spinlock_t
* lock
, const char *base_file
, int line_no
)
1021 if ((ret
= !test_and_set_bit(0, lock
))) {
1022 lock
->on_cpu
= smp_processor_id();
1023 lock
->previous
= __builtin_return_address(0);
1024 lock
->task
= current
;
1026 lock
->base_file
= base_file
;
1027 lock
->line_no
= line_no
;
1031 #endif /* DEBUG_SPINLOCK */
1034 void write_lock(rwlock_t
* lock
)
1037 int stuck_lock
, stuck_reader
;
1038 void *inline_pc
= __builtin_return_address(0);
1043 stuck_reader
= 1<<26;
1045 __asm__
__volatile__(
1053 ".section .text2,\"ax\"\n"
1054 "6: blt %3,4b # debug\n"
1055 " subl %3,1,%3 # debug\n"
1058 "8: blt %4,4b # debug\n"
1059 " subl %4,1,%4 # debug\n"
1064 : "=m" (__dummy_lock(lock
)), "=&r" (regx
), "=&r" (regy
),
1065 "=&r" (stuck_lock
), "=&r" (stuck_reader
)
1066 : "0" (__dummy_lock(lock
)), "3" (stuck_lock
), "4" (stuck_reader
));
1068 if (stuck_lock
< 0) {
1069 printk(KERN_WARNING
"write_lock stuck at %p\n", inline_pc
);
1072 if (stuck_reader
< 0) {
1073 printk(KERN_WARNING
"write_lock stuck on readers at %p\n",
1079 void read_lock(rwlock_t
* lock
)
1083 void *inline_pc
= __builtin_return_address(0);
1089 __asm__
__volatile__(
1096 ".section .text2,\"ax\"\n"
1098 " blt %2,4b # debug\n"
1099 " subl %2,1,%2 # debug\n"
1103 : "=m" (__dummy_lock(lock
)), "=&r" (regx
), "=&r" (stuck_lock
)
1104 : "0" (__dummy_lock(lock
)), "2" (stuck_lock
));
1106 if (stuck_lock
< 0) {
1107 printk(KERN_WARNING
"read_lock stuck at %p\n", inline_pc
);
1111 #endif /* DEBUG_RWLOCK */