2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 #include <linux/cpu.h>
32 #include <asm/hwrpb.h>
33 #include <asm/ptrace.h>
34 #include <asm/atomic.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <asm/tlbflush.h>
49 #define DBGS(args) printk args
54 /* A collection of per-processor data. */
55 struct cpuinfo_alpha cpu_data
[NR_CPUS
];
56 EXPORT_SYMBOL(cpu_data
);
58 /* A collection of single bit ipi messages. */
60 unsigned long bits ____cacheline_aligned
;
61 } ipi_data
[NR_CPUS
] __cacheline_aligned
;
63 enum ipi_message_type
{
70 /* Set to a secondary's cpuid when it comes online. */
71 static int smp_secondary_alive __devinitdata
= 0;
73 int smp_num_probed
; /* Internal processor count */
74 int smp_num_cpus
= 1; /* Number that came online. */
75 EXPORT_SYMBOL(smp_num_cpus
);
78 * Called by both boot and secondaries to move global data into
79 * per-processor storage.
81 static inline void __init
82 smp_store_cpu_info(int cpuid
)
84 cpu_data
[cpuid
].loops_per_jiffy
= loops_per_jiffy
;
85 cpu_data
[cpuid
].last_asn
= ASN_FIRST_VERSION
;
86 cpu_data
[cpuid
].need_new_asn
= 0;
87 cpu_data
[cpuid
].asn_lock
= 0;
91 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
93 static inline void __init
94 smp_setup_percpu_timer(int cpuid
)
96 cpu_data
[cpuid
].prof_counter
= 1;
97 cpu_data
[cpuid
].prof_multiplier
= 1;
101 wait_boot_cpu_to_stop(int cpuid
)
103 unsigned long stop
= jiffies
+ 10*HZ
;
105 while (time_before(jiffies
, stop
)) {
106 if (!smp_secondary_alive
)
111 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid
);
117 * Where secondaries begin a life of C.
122 int cpuid
= hard_smp_processor_id();
124 if (cpu_online(cpuid
)) {
125 printk("??, cpu 0x%x already present??\n", cpuid
);
128 set_cpu_online(cpuid
, true);
130 /* Turn on machine checks. */
133 /* Set trap vectors. */
136 /* Set interrupt vector. */
139 /* Get our local ticker going. */
140 smp_setup_percpu_timer(cpuid
);
142 /* Call platform-specific callin, if specified */
143 if (alpha_mv
.smp_callin
) alpha_mv
.smp_callin();
145 /* All kernel threads share the same mm context. */
146 atomic_inc(&init_mm
.mm_count
);
147 current
->active_mm
= &init_mm
;
149 /* inform the notifiers about the new cpu */
150 notify_cpu_starting(cpuid
);
152 /* Must have completely accurate bogos. */
155 /* Wait boot CPU to stop with irq enabled before running
157 wait_boot_cpu_to_stop(cpuid
);
161 smp_store_cpu_info(cpuid
);
162 /* Allow master to continue only after we written loops_per_jiffy. */
164 smp_secondary_alive
= 1;
166 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
167 cpuid
, current
, current
->active_mm
));
173 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
175 wait_for_txrdy (unsigned long cpumask
)
177 unsigned long timeout
;
179 if (!(hwrpb
->txrdy
& cpumask
))
182 timeout
= jiffies
+ 10*HZ
;
183 while (time_before(jiffies
, timeout
)) {
184 if (!(hwrpb
->txrdy
& cpumask
))
194 * Send a message to a secondary's console. "START" is one such
195 * interesting message. ;-)
197 static void __cpuinit
198 send_secondary_console_msg(char *str
, int cpuid
)
200 struct percpu_struct
*cpu
;
201 register char *cp1
, *cp2
;
202 unsigned long cpumask
;
205 cpu
= (struct percpu_struct
*)
207 + hwrpb
->processor_offset
208 + cpuid
* hwrpb
->processor_size
);
210 cpumask
= (1UL << cpuid
);
211 if (wait_for_txrdy(cpumask
))
216 *(unsigned int *)&cpu
->ipc_buffer
[0] = len
;
217 cp1
= (char *) &cpu
->ipc_buffer
[1];
218 memcpy(cp1
, cp2
, len
);
220 /* atomic test and set */
222 set_bit(cpuid
, &hwrpb
->rxrdy
);
224 if (wait_for_txrdy(cpumask
))
229 printk("Processor %x not ready\n", cpuid
);
233 * A secondary console wants to send a message. Receive it.
236 recv_secondary_console_msg(void)
239 unsigned long txrdy
= hwrpb
->txrdy
;
240 char *cp1
, *cp2
, buf
[80];
241 struct percpu_struct
*cpu
;
243 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy
));
245 mycpu
= hard_smp_processor_id();
247 for (i
= 0; i
< NR_CPUS
; i
++) {
248 if (!(txrdy
& (1UL << i
)))
251 DBGS(("recv_secondary_console_msg: "
252 "TXRDY contains CPU %d.\n", i
));
254 cpu
= (struct percpu_struct
*)
256 + hwrpb
->processor_offset
257 + i
* hwrpb
->processor_size
);
259 DBGS(("recv_secondary_console_msg: on %d from %d"
260 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
261 mycpu
, i
, cpu
->halt_reason
, cpu
->flags
));
263 cnt
= cpu
->ipc_buffer
[0] >> 32;
264 if (cnt
<= 0 || cnt
>= 80)
265 strcpy(buf
, "<<< BOGUS MSG >>>");
267 cp1
= (char *) &cpu
->ipc_buffer
[11];
271 while ((cp2
= strchr(cp2
, '\r')) != 0) {
278 DBGS((KERN_INFO
"recv_secondary_console_msg: on %d "
279 "message is '%s'\n", mycpu
, buf
));
286 * Convince the console to have a secondary cpu begin execution.
289 secondary_cpu_start(int cpuid
, struct task_struct
*idle
)
291 struct percpu_struct
*cpu
;
292 struct pcb_struct
*hwpcb
, *ipcb
;
293 unsigned long timeout
;
295 cpu
= (struct percpu_struct
*)
297 + hwrpb
->processor_offset
298 + cpuid
* hwrpb
->processor_size
);
299 hwpcb
= (struct pcb_struct
*) cpu
->hwpcb
;
300 ipcb
= &task_thread_info(idle
)->pcb
;
302 /* Initialize the CPU's HWPCB to something just good enough for
303 us to get started. Immediately after starting, we'll swpctx
304 to the target idle task's pcb. Reuse the stack in the mean
305 time. Precalculate the target PCBB. */
306 hwpcb
->ksp
= (unsigned long)ipcb
+ sizeof(union thread_union
) - 16;
308 hwpcb
->ptbr
= ipcb
->ptbr
;
311 hwpcb
->unique
= virt_to_phys(ipcb
);
312 hwpcb
->flags
= ipcb
->flags
;
313 hwpcb
->res1
= hwpcb
->res2
= 0;
315 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
316 cpuid
, idle
->state
, ipcb
->flags
));
318 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
319 hwrpb
->CPU_restart
= __smp_callin
;
320 hwrpb
->CPU_restart_data
= (unsigned long) __smp_callin
;
322 /* Recalculate and update the HWRPB checksum */
323 hwrpb_update_checksum(hwrpb
);
326 * Send a "start" command to the specified processor.
329 /* SRM III 3.4.1.3 */
330 cpu
->flags
|= 0x22; /* turn on Context Valid and Restart Capable */
331 cpu
->flags
&= ~1; /* turn off Bootstrap In Progress */
334 send_secondary_console_msg("START\r\n", cpuid
);
336 /* Wait 10 seconds for an ACK from the console. */
337 timeout
= jiffies
+ 10*HZ
;
338 while (time_before(jiffies
, timeout
)) {
344 printk(KERN_ERR
"SMP: Processor %d failed to start.\n", cpuid
);
348 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid
));
353 * Bring one cpu online.
356 smp_boot_one_cpu(int cpuid
)
358 struct task_struct
*idle
;
359 unsigned long timeout
;
361 /* Cook up an idler for this guy. Note that the address we
362 give to kernel_thread is irrelevant -- it's going to start
363 where HWRPB.CPU_restart says to start. But this gets all
364 the other task-y sort of data structures set up like we
365 wish. We can't use kernel_thread since we must avoid
366 rescheduling the child. */
367 idle
= fork_idle(cpuid
);
369 panic("failed fork for CPU %d", cpuid
);
371 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
372 cpuid
, idle
->state
, idle
->flags
));
374 /* Signal the secondary to wait a moment. */
375 smp_secondary_alive
= -1;
377 /* Whirrr, whirrr, whirrrrrrrrr... */
378 if (secondary_cpu_start(cpuid
, idle
))
381 /* Notify the secondary CPU it can run calibrate_delay. */
383 smp_secondary_alive
= 0;
385 /* We've been acked by the console; wait one second for
386 the task to start up for real. */
387 timeout
= jiffies
+ 1*HZ
;
388 while (time_before(jiffies
, timeout
)) {
389 if (smp_secondary_alive
== 1)
395 /* We failed to boot the CPU. */
397 printk(KERN_ERR
"SMP: Processor %d is stuck.\n", cpuid
);
401 /* Another "Red Snapper". */
406 * Called from setup_arch. Detect an SMP system and which processors
412 struct percpu_struct
*cpubase
, *cpu
;
415 if (boot_cpuid
!= 0) {
416 printk(KERN_WARNING
"SMP: Booting off cpu %d instead of 0?\n",
420 if (hwrpb
->nr_processors
> 1) {
423 DBGS(("setup_smp: nr_processors %ld\n",
424 hwrpb
->nr_processors
));
426 cpubase
= (struct percpu_struct
*)
427 ((char*)hwrpb
+ hwrpb
->processor_offset
);
428 boot_cpu_palrev
= cpubase
->pal_revision
;
430 for (i
= 0; i
< hwrpb
->nr_processors
; i
++) {
431 cpu
= (struct percpu_struct
*)
432 ((char *)cpubase
+ i
*hwrpb
->processor_size
);
433 if ((cpu
->flags
& 0x1cc) == 0x1cc) {
435 set_cpu_possible(i
, true);
436 set_cpu_present(i
, true);
437 cpu
->pal_revision
= boot_cpu_palrev
;
440 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
441 i
, cpu
->flags
, cpu
->type
));
442 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
443 i
, cpu
->pal_revision
));
449 printk(KERN_INFO
"SMP: %d CPUs probed -- cpu_present_map = %lx\n",
450 smp_num_probed
, cpu_present_map
.bits
[0]);
454 * Called by smp_init prepare the secondaries
457 smp_prepare_cpus(unsigned int max_cpus
)
459 /* Take care of some initial bookkeeping. */
460 memset(ipi_data
, 0, sizeof(ipi_data
));
462 current_thread_info()->cpu
= boot_cpuid
;
464 smp_store_cpu_info(boot_cpuid
);
465 smp_setup_percpu_timer(boot_cpuid
);
467 /* Nothing to do on a UP box, or when told not to. */
468 if (smp_num_probed
== 1 || max_cpus
== 0) {
469 init_cpu_possible(cpumask_of(boot_cpuid
));
470 init_cpu_present(cpumask_of(boot_cpuid
));
471 printk(KERN_INFO
"SMP mode deactivated.\n");
475 printk(KERN_INFO
"SMP starting up secondaries.\n");
477 smp_num_cpus
= smp_num_probed
;
481 smp_prepare_boot_cpu(void)
486 __cpu_up(unsigned int cpu
)
488 smp_boot_one_cpu(cpu
);
490 return cpu_online(cpu
) ? 0 : -ENOSYS
;
494 smp_cpus_done(unsigned int max_cpus
)
497 unsigned long bogosum
= 0;
499 for(cpu
= 0; cpu
< NR_CPUS
; cpu
++)
501 bogosum
+= cpu_data
[cpu
].loops_per_jiffy
;
503 printk(KERN_INFO
"SMP: Total of %d processors activated "
504 "(%lu.%02lu BogoMIPS).\n",
506 (bogosum
+ 2500) / (500000/HZ
),
507 ((bogosum
+ 2500) / (5000/HZ
)) % 100);
512 smp_percpu_timer_interrupt(struct pt_regs
*regs
)
514 struct pt_regs
*old_regs
;
515 int cpu
= smp_processor_id();
516 unsigned long user
= user_mode(regs
);
517 struct cpuinfo_alpha
*data
= &cpu_data
[cpu
];
519 old_regs
= set_irq_regs(regs
);
521 /* Record kernel PC. */
522 profile_tick(CPU_PROFILING
);
524 if (!--data
->prof_counter
) {
525 /* We need to make like a normal interrupt -- otherwise
526 timer interrupts ignore the global interrupt lock,
527 which would be a Bad Thing. */
530 update_process_times(user
);
532 data
->prof_counter
= data
->prof_multiplier
;
536 set_irq_regs(old_regs
);
540 setup_profiling_timer(unsigned int multiplier
)
547 send_ipi_message(const struct cpumask
*to_whom
, enum ipi_message_type operation
)
552 for_each_cpu(i
, to_whom
)
553 set_bit(operation
, &ipi_data
[i
].bits
);
556 for_each_cpu(i
, to_whom
)
561 handle_ipi(struct pt_regs
*regs
)
563 int this_cpu
= smp_processor_id();
564 unsigned long *pending_ipis
= &ipi_data
[this_cpu
].bits
;
568 mb(); /* Order interrupt and bit testing. */
569 while ((ops
= xchg(pending_ipis
, 0)) != 0) {
570 mb(); /* Order bit clearing and data access. */
576 which
= __ffs(which
);
580 /* Reschedule callback. Everything to be done
581 is done by the interrupt return path. */
585 generic_smp_call_function_interrupt();
588 case IPI_CALL_FUNC_SINGLE
:
589 generic_smp_call_function_single_interrupt();
596 printk(KERN_CRIT
"Unknown IPI on CPU %d: %lu\n",
602 mb(); /* Order data access and bit testing. */
605 cpu_data
[this_cpu
].ipi_count
++;
608 recv_secondary_console_msg();
612 smp_send_reschedule(int cpu
)
615 if (cpu
== hard_smp_processor_id())
617 "smp_send_reschedule: Sending IPI to self.\n");
619 send_ipi_message(cpumask_of(cpu
), IPI_RESCHEDULE
);
625 cpumask_t to_whom
= cpu_possible_map
;
626 cpu_clear(smp_processor_id(), to_whom
);
628 if (hard_smp_processor_id() != boot_cpu_id
)
629 printk(KERN_WARNING
"smp_send_stop: Not on boot cpu.\n");
631 send_ipi_message(&to_whom
, IPI_CPU_STOP
);
634 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
636 send_ipi_message(mask
, IPI_CALL_FUNC
);
639 void arch_send_call_function_single_ipi(int cpu
)
641 send_ipi_message(cpumask_of(cpu
), IPI_CALL_FUNC_SINGLE
);
645 ipi_imb(void *ignored
)
653 /* Must wait other processors to flush their icache before continue. */
654 if (on_each_cpu(ipi_imb
, NULL
, 1))
655 printk(KERN_CRIT
"smp_imb: timed out\n");
657 EXPORT_SYMBOL(smp_imb
);
660 ipi_flush_tlb_all(void *ignored
)
668 /* Although we don't have any data to pass, we do want to
669 synchronize with the other processors. */
670 if (on_each_cpu(ipi_flush_tlb_all
, NULL
, 1)) {
671 printk(KERN_CRIT
"flush_tlb_all: timed out\n");
675 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
678 ipi_flush_tlb_mm(void *x
)
680 struct mm_struct
*mm
= (struct mm_struct
*) x
;
681 if (mm
== current
->active_mm
&& !asn_locked())
682 flush_tlb_current(mm
);
688 flush_tlb_mm(struct mm_struct
*mm
)
692 if (mm
== current
->active_mm
) {
693 flush_tlb_current(mm
);
694 if (atomic_read(&mm
->mm_users
) <= 1) {
695 int cpu
, this_cpu
= smp_processor_id();
696 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
697 if (!cpu_online(cpu
) || cpu
== this_cpu
)
699 if (mm
->context
[cpu
])
700 mm
->context
[cpu
] = 0;
707 if (smp_call_function(ipi_flush_tlb_mm
, mm
, 1)) {
708 printk(KERN_CRIT
"flush_tlb_mm: timed out\n");
713 EXPORT_SYMBOL(flush_tlb_mm
);
715 struct flush_tlb_page_struct
{
716 struct vm_area_struct
*vma
;
717 struct mm_struct
*mm
;
722 ipi_flush_tlb_page(void *x
)
724 struct flush_tlb_page_struct
*data
= (struct flush_tlb_page_struct
*)x
;
725 struct mm_struct
* mm
= data
->mm
;
727 if (mm
== current
->active_mm
&& !asn_locked())
728 flush_tlb_current_page(mm
, data
->vma
, data
->addr
);
734 flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
736 struct flush_tlb_page_struct data
;
737 struct mm_struct
*mm
= vma
->vm_mm
;
741 if (mm
== current
->active_mm
) {
742 flush_tlb_current_page(mm
, vma
, addr
);
743 if (atomic_read(&mm
->mm_users
) <= 1) {
744 int cpu
, this_cpu
= smp_processor_id();
745 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
746 if (!cpu_online(cpu
) || cpu
== this_cpu
)
748 if (mm
->context
[cpu
])
749 mm
->context
[cpu
] = 0;
760 if (smp_call_function(ipi_flush_tlb_page
, &data
, 1)) {
761 printk(KERN_CRIT
"flush_tlb_page: timed out\n");
766 EXPORT_SYMBOL(flush_tlb_page
);
769 flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
771 /* On the Alpha we always flush the whole user tlb. */
772 flush_tlb_mm(vma
->vm_mm
);
774 EXPORT_SYMBOL(flush_tlb_range
);
777 ipi_flush_icache_page(void *x
)
779 struct mm_struct
*mm
= (struct mm_struct
*) x
;
780 if (mm
== current
->active_mm
&& !asn_locked())
781 __load_new_mm_context(mm
);
787 flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
788 unsigned long addr
, int len
)
790 struct mm_struct
*mm
= vma
->vm_mm
;
792 if ((vma
->vm_flags
& VM_EXEC
) == 0)
797 if (mm
== current
->active_mm
) {
798 __load_new_mm_context(mm
);
799 if (atomic_read(&mm
->mm_users
) <= 1) {
800 int cpu
, this_cpu
= smp_processor_id();
801 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
802 if (!cpu_online(cpu
) || cpu
== this_cpu
)
804 if (mm
->context
[cpu
])
805 mm
->context
[cpu
] = 0;
812 if (smp_call_function(ipi_flush_icache_page
, mm
, 1)) {
813 printk(KERN_CRIT
"flush_icache_page: timed out\n");