1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/export.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
24 #include <linux/vmalloc.h>
25 #include <linux/ftrace.h>
26 #include <linux/cpu.h>
27 #include <linux/slab.h>
30 #include <asm/ptrace.h>
31 #include <linux/atomic.h>
32 #include <asm/tlbflush.h>
33 #include <asm/mmu_context.h>
34 #include <asm/cpudata.h>
35 #include <asm/hvtramp.h>
37 #include <asm/timer.h>
40 #include <asm/irq_regs.h>
42 #include <asm/pgtable.h>
43 #include <asm/oplib.h>
44 #include <asm/uaccess.h>
45 #include <asm/starfire.h>
47 #include <asm/sections.h>
49 #include <asm/mdesc.h>
51 #include <asm/hypervisor.h>
56 int sparc64_multi_core __read_mostly
;
58 DEFINE_PER_CPU(cpumask_t
, cpu_sibling_map
) = CPU_MASK_NONE
;
59 cpumask_t cpu_core_map
[NR_CPUS
] __read_mostly
=
60 { [0 ... NR_CPUS
-1] = CPU_MASK_NONE
};
62 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
63 EXPORT_SYMBOL(cpu_core_map
);
65 static cpumask_t smp_commenced_mask
;
67 void smp_info(struct seq_file
*m
)
71 seq_printf(m
, "State:\n");
72 for_each_online_cpu(i
)
73 seq_printf(m
, "CPU%d:\t\tonline\n", i
);
76 void smp_bogo(struct seq_file
*m
)
80 for_each_online_cpu(i
)
82 "Cpu%dClkTck\t: %016lx\n",
83 i
, cpu_data(i
).clock_tick
);
86 extern void setup_sparc64_timer(void);
88 static volatile unsigned long callin_flag
= 0;
90 void __cpuinit
smp_callin(void)
92 int cpuid
= hard_smp_processor_id();
94 __local_per_cpu_offset
= __per_cpu_offset(cpuid
);
96 if (tlb_type
== hypervisor
)
97 sun4v_ktsb_register();
101 setup_sparc64_timer();
103 if (cheetah_pcache_forced_on
)
104 cheetah_enable_pcache();
107 __asm__
__volatile__("membar #Sync\n\t"
108 "flush %%g6" : : : "memory");
110 /* Clear this or we will die instantly when we
111 * schedule back to this idler...
113 current_thread_info()->new_child
= 0;
115 /* Attach to the address space of init_task. */
116 atomic_inc(&init_mm
.mm_count
);
117 current
->active_mm
= &init_mm
;
119 /* inform the notifiers about the new cpu */
120 notify_cpu_starting(cpuid
);
122 while (!cpumask_test_cpu(cpuid
, &smp_commenced_mask
))
125 set_cpu_online(cpuid
, true);
128 /* idle thread is expected to have preempt disabled */
134 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
135 panic("SMP bolixed\n");
138 /* This tick register synchronization scheme is taken entirely from
139 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
141 * The only change I've made is to rework it so that the master
142 * initiates the synchonization instead of the slave. -DaveM
146 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
148 #define NUM_ROUNDS 64 /* magic value */
149 #define NUM_ITERS 5 /* likewise */
151 static DEFINE_SPINLOCK(itc_sync_lock
);
152 static unsigned long go
[SLAVE
+ 1];
154 #define DEBUG_TICK_SYNC 0
156 static inline long get_delta (long *rt
, long *master
)
158 unsigned long best_t0
= 0, best_t1
= ~0UL, best_tm
= 0;
159 unsigned long tcenter
, t0
, t1
, tm
;
162 for (i
= 0; i
< NUM_ITERS
; i
++) {
163 t0
= tick_ops
->get_tick();
165 membar_safe("#StoreLoad");
166 while (!(tm
= go
[SLAVE
]))
170 t1
= tick_ops
->get_tick();
172 if (t1
- t0
< best_t1
- best_t0
)
173 best_t0
= t0
, best_t1
= t1
, best_tm
= tm
;
176 *rt
= best_t1
- best_t0
;
177 *master
= best_tm
- best_t0
;
179 /* average best_t0 and best_t1 without overflow: */
180 tcenter
= (best_t0
/2 + best_t1
/2);
181 if (best_t0
% 2 + best_t1
% 2 == 2)
183 return tcenter
- best_tm
;
186 void smp_synchronize_tick_client(void)
188 long i
, delta
, adj
, adjust_latency
= 0, done
= 0;
189 unsigned long flags
, rt
, master_time_stamp
;
192 long rt
; /* roundtrip time */
193 long master
; /* master's timestamp */
194 long diff
; /* difference between midpoint and master's timestamp */
195 long lat
; /* estimate of itc adjustment latency */
204 local_irq_save(flags
);
206 for (i
= 0; i
< NUM_ROUNDS
; i
++) {
207 delta
= get_delta(&rt
, &master_time_stamp
);
209 done
= 1; /* let's lock on to this... */
213 adjust_latency
+= -delta
;
214 adj
= -delta
+ adjust_latency
/4;
218 tick_ops
->add_tick(adj
);
222 t
[i
].master
= master_time_stamp
;
224 t
[i
].lat
= adjust_latency
/4;
228 local_irq_restore(flags
);
231 for (i
= 0; i
< NUM_ROUNDS
; i
++)
232 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
233 t
[i
].rt
, t
[i
].master
, t
[i
].diff
, t
[i
].lat
);
236 printk(KERN_INFO
"CPU %d: synchronized TICK with master CPU "
237 "(last diff %ld cycles, maxerr %lu cycles)\n",
238 smp_processor_id(), delta
, rt
);
241 static void smp_start_sync_tick_client(int cpu
);
243 static void smp_synchronize_one_tick(int cpu
)
245 unsigned long flags
, i
;
249 smp_start_sync_tick_client(cpu
);
251 /* wait for client to be ready */
255 /* now let the client proceed into his loop */
257 membar_safe("#StoreLoad");
259 spin_lock_irqsave(&itc_sync_lock
, flags
);
261 for (i
= 0; i
< NUM_ROUNDS
*NUM_ITERS
; i
++) {
266 go
[SLAVE
] = tick_ops
->get_tick();
267 membar_safe("#StoreLoad");
270 spin_unlock_irqrestore(&itc_sync_lock
, flags
);
273 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
274 /* XXX Put this in some common place. XXX */
275 static unsigned long kimage_addr_to_ra(void *p
)
277 unsigned long val
= (unsigned long) p
;
279 return kern_base
+ (val
- KERNBASE
);
282 static void __cpuinit
ldom_startcpu_cpuid(unsigned int cpu
, unsigned long thread_reg
, void **descrp
)
284 extern unsigned long sparc64_ttable_tl0
;
285 extern unsigned long kern_locked_tte_data
;
286 struct hvtramp_descr
*hdesc
;
287 unsigned long trampoline_ra
;
288 struct trap_per_cpu
*tb
;
289 u64 tte_vaddr
, tte_data
;
290 unsigned long hv_err
;
293 hdesc
= kzalloc(sizeof(*hdesc
) +
294 (sizeof(struct hvtramp_mapping
) *
295 num_kernel_image_mappings
- 1),
298 printk(KERN_ERR
"ldom_startcpu_cpuid: Cannot allocate "
305 hdesc
->num_mappings
= num_kernel_image_mappings
;
307 tb
= &trap_block
[cpu
];
309 hdesc
->fault_info_va
= (unsigned long) &tb
->fault_info
;
310 hdesc
->fault_info_pa
= kimage_addr_to_ra(&tb
->fault_info
);
312 hdesc
->thread_reg
= thread_reg
;
314 tte_vaddr
= (unsigned long) KERNBASE
;
315 tte_data
= kern_locked_tte_data
;
317 for (i
= 0; i
< hdesc
->num_mappings
; i
++) {
318 hdesc
->maps
[i
].vaddr
= tte_vaddr
;
319 hdesc
->maps
[i
].tte
= tte_data
;
320 tte_vaddr
+= 0x400000;
321 tte_data
+= 0x400000;
324 trampoline_ra
= kimage_addr_to_ra(hv_cpu_startup
);
326 hv_err
= sun4v_cpu_start(cpu
, trampoline_ra
,
327 kimage_addr_to_ra(&sparc64_ttable_tl0
),
330 printk(KERN_ERR
"ldom_startcpu_cpuid: sun4v_cpu_start() "
331 "gives error %lu\n", hv_err
);
335 extern unsigned long sparc64_cpu_startup
;
337 /* The OBP cpu startup callback truncates the 3rd arg cookie to
338 * 32-bits (I think) so to be safe we have it read the pointer
339 * contained here so we work on >4GB machines. -DaveM
341 static struct thread_info
*cpu_new_thread
= NULL
;
343 static int __cpuinit
smp_boot_one_cpu(unsigned int cpu
, struct task_struct
*idle
)
345 unsigned long entry
=
346 (unsigned long)(&sparc64_cpu_startup
);
347 unsigned long cookie
=
348 (unsigned long)(&cpu_new_thread
);
353 cpu_new_thread
= task_thread_info(idle
);
355 if (tlb_type
== hypervisor
) {
356 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
357 if (ldom_domaining_enabled
)
358 ldom_startcpu_cpuid(cpu
,
359 (unsigned long) cpu_new_thread
,
363 prom_startcpu_cpuid(cpu
, entry
, cookie
);
365 struct device_node
*dp
= of_find_node_by_cpuid(cpu
);
367 prom_startcpu(dp
->phandle
, entry
, cookie
);
370 for (timeout
= 0; timeout
< 50000; timeout
++) {
379 printk("Processor %d is stuck.\n", cpu
);
382 cpu_new_thread
= NULL
;
389 static void spitfire_xcall_helper(u64 data0
, u64 data1
, u64 data2
, u64 pstate
, unsigned long cpu
)
394 if (this_is_starfire
) {
395 /* map to real upaid */
396 cpu
= (((cpu
& 0x3c) << 1) |
397 ((cpu
& 0x40) >> 4) |
401 target
= (cpu
<< 14) | 0x70;
403 /* Ok, this is the real Spitfire Errata #54.
404 * One must read back from a UDB internal register
405 * after writes to the UDB interrupt dispatch, but
406 * before the membar Sync for that write.
407 * So we use the high UDB control register (ASI 0x7f,
408 * ADDR 0x20) for the dummy read. -DaveM
411 __asm__
__volatile__(
412 "wrpr %1, %2, %%pstate\n\t"
413 "stxa %4, [%0] %3\n\t"
414 "stxa %5, [%0+%8] %3\n\t"
416 "stxa %6, [%0+%8] %3\n\t"
418 "stxa %%g0, [%7] %3\n\t"
421 "ldxa [%%g1] 0x7f, %%g0\n\t"
424 : "r" (pstate
), "i" (PSTATE_IE
), "i" (ASI_INTR_W
),
425 "r" (data0
), "r" (data1
), "r" (data2
), "r" (target
),
426 "r" (0x10), "0" (tmp
)
429 /* NOTE: PSTATE_IE is still clear. */
432 __asm__
__volatile__("ldxa [%%g0] %1, %0"
434 : "i" (ASI_INTR_DISPATCH_STAT
));
436 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
443 } while (result
& 0x1);
444 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
447 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
448 smp_processor_id(), result
);
455 static void spitfire_xcall_deliver(struct trap_per_cpu
*tb
, int cnt
)
457 u64
*mondo
, data0
, data1
, data2
;
462 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
463 cpu_list
= __va(tb
->cpu_list_pa
);
464 mondo
= __va(tb
->cpu_mondo_block_pa
);
468 for (i
= 0; i
< cnt
; i
++)
469 spitfire_xcall_helper(data0
, data1
, data2
, pstate
, cpu_list
[i
]);
472 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
473 * packet, but we have no use for that. However we do take advantage of
474 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
476 static void cheetah_xcall_deliver(struct trap_per_cpu
*tb
, int cnt
)
478 int nack_busy_id
, is_jbus
, need_more
;
479 u64
*mondo
, pstate
, ver
, busy_mask
;
482 cpu_list
= __va(tb
->cpu_list_pa
);
483 mondo
= __va(tb
->cpu_mondo_block_pa
);
485 /* Unfortunately, someone at Sun had the brilliant idea to make the
486 * busy/nack fields hard-coded by ITID number for this Ultra-III
487 * derivative processor.
489 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
490 is_jbus
= ((ver
>> 32) == __JALAPENO_ID
||
491 (ver
>> 32) == __SERRANO_ID
);
493 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
497 __asm__
__volatile__("wrpr %0, %1, %%pstate\n\t"
498 : : "r" (pstate
), "i" (PSTATE_IE
));
500 /* Setup the dispatch data registers. */
501 __asm__
__volatile__("stxa %0, [%3] %6\n\t"
502 "stxa %1, [%4] %6\n\t"
503 "stxa %2, [%5] %6\n\t"
506 : "r" (mondo
[0]), "r" (mondo
[1]), "r" (mondo
[2]),
507 "r" (0x40), "r" (0x50), "r" (0x60),
515 for (i
= 0; i
< cnt
; i
++) {
522 target
= (nr
<< 14) | 0x70;
524 busy_mask
|= (0x1UL
<< (nr
* 2));
526 target
|= (nack_busy_id
<< 24);
527 busy_mask
|= (0x1UL
<<
530 __asm__
__volatile__(
531 "stxa %%g0, [%0] %1\n\t"
534 : "r" (target
), "i" (ASI_INTR_W
));
536 if (nack_busy_id
== 32) {
543 /* Now, poll for completion. */
545 u64 dispatch_stat
, nack_mask
;
548 stuck
= 100000 * nack_busy_id
;
549 nack_mask
= busy_mask
<< 1;
551 __asm__
__volatile__("ldxa [%%g0] %1, %0"
552 : "=r" (dispatch_stat
)
553 : "i" (ASI_INTR_DISPATCH_STAT
));
554 if (!(dispatch_stat
& (busy_mask
| nack_mask
))) {
555 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
557 if (unlikely(need_more
)) {
559 for (i
= 0; i
< cnt
; i
++) {
560 if (cpu_list
[i
] == 0xffff)
562 cpu_list
[i
] = 0xffff;
573 } while (dispatch_stat
& busy_mask
);
575 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
578 if (dispatch_stat
& busy_mask
) {
579 /* Busy bits will not clear, continue instead
580 * of freezing up on this cpu.
582 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
583 smp_processor_id(), dispatch_stat
);
585 int i
, this_busy_nack
= 0;
587 /* Delay some random time with interrupts enabled
588 * to prevent deadlock.
590 udelay(2 * nack_busy_id
);
592 /* Clear out the mask bits for cpus which did not
595 for (i
= 0; i
< cnt
; i
++) {
603 check_mask
= (0x2UL
<< (2*nr
));
605 check_mask
= (0x2UL
<<
607 if ((dispatch_stat
& check_mask
) == 0)
608 cpu_list
[i
] = 0xffff;
610 if (this_busy_nack
== 64)
619 /* Multi-cpu list version. */
620 static void hypervisor_xcall_deliver(struct trap_per_cpu
*tb
, int cnt
)
622 int retries
, this_cpu
, prev_sent
, i
, saw_cpu_error
;
623 unsigned long status
;
626 this_cpu
= smp_processor_id();
628 cpu_list
= __va(tb
->cpu_list_pa
);
634 int forward_progress
, n_sent
;
636 status
= sun4v_cpu_mondo_send(cnt
,
638 tb
->cpu_mondo_block_pa
);
640 /* HV_EOK means all cpus received the xcall, we're done. */
641 if (likely(status
== HV_EOK
))
644 /* First, see if we made any forward progress.
646 * The hypervisor indicates successful sends by setting
647 * cpu list entries to the value 0xffff.
650 for (i
= 0; i
< cnt
; i
++) {
651 if (likely(cpu_list
[i
] == 0xffff))
655 forward_progress
= 0;
656 if (n_sent
> prev_sent
)
657 forward_progress
= 1;
661 /* If we get a HV_ECPUERROR, then one or more of the cpus
662 * in the list are in error state. Use the cpu_state()
663 * hypervisor call to find out which cpus are in error state.
665 if (unlikely(status
== HV_ECPUERROR
)) {
666 for (i
= 0; i
< cnt
; i
++) {
674 err
= sun4v_cpu_state(cpu
);
675 if (err
== HV_CPU_STATE_ERROR
) {
676 saw_cpu_error
= (cpu
+ 1);
677 cpu_list
[i
] = 0xffff;
680 } else if (unlikely(status
!= HV_EWOULDBLOCK
))
681 goto fatal_mondo_error
;
683 /* Don't bother rewriting the CPU list, just leave the
684 * 0xffff and non-0xffff entries in there and the
685 * hypervisor will do the right thing.
687 * Only advance timeout state if we didn't make any
690 if (unlikely(!forward_progress
)) {
691 if (unlikely(++retries
> 10000))
692 goto fatal_mondo_timeout
;
694 /* Delay a little bit to let other cpus catch up
695 * on their cpu mondo queue work.
701 if (unlikely(saw_cpu_error
))
702 goto fatal_mondo_cpu_error
;
706 fatal_mondo_cpu_error
:
707 printk(KERN_CRIT
"CPU[%d]: SUN4V mondo cpu error, some target cpus "
708 "(including %d) were in error state\n",
709 this_cpu
, saw_cpu_error
- 1);
713 printk(KERN_CRIT
"CPU[%d]: SUN4V mondo timeout, no forward "
714 " progress after %d retries.\n",
716 goto dump_cpu_list_and_out
;
719 printk(KERN_CRIT
"CPU[%d]: Unexpected SUN4V mondo error %lu\n",
721 printk(KERN_CRIT
"CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
722 "mondo_block_pa(%lx)\n",
723 this_cpu
, cnt
, tb
->cpu_list_pa
, tb
->cpu_mondo_block_pa
);
725 dump_cpu_list_and_out
:
726 printk(KERN_CRIT
"CPU[%d]: CPU list [ ", this_cpu
);
727 for (i
= 0; i
< cnt
; i
++)
728 printk("%u ", cpu_list
[i
]);
732 static void (*xcall_deliver_impl
)(struct trap_per_cpu
*, int);
734 static void xcall_deliver(u64 data0
, u64 data1
, u64 data2
, const cpumask_t
*mask
)
736 struct trap_per_cpu
*tb
;
737 int this_cpu
, i
, cnt
;
742 /* We have to do this whole thing with interrupts fully disabled.
743 * Otherwise if we send an xcall from interrupt context it will
744 * corrupt both our mondo block and cpu list state.
746 * One consequence of this is that we cannot use timeout mechanisms
747 * that depend upon interrupts being delivered locally. So, for
748 * example, we cannot sample jiffies and expect it to advance.
750 * Fortunately, udelay() uses %stick/%tick so we can use that.
752 local_irq_save(flags
);
754 this_cpu
= smp_processor_id();
755 tb
= &trap_block
[this_cpu
];
757 mondo
= __va(tb
->cpu_mondo_block_pa
);
763 cpu_list
= __va(tb
->cpu_list_pa
);
765 /* Setup the initial cpu list. */
767 for_each_cpu(i
, mask
) {
768 if (i
== this_cpu
|| !cpu_online(i
))
774 xcall_deliver_impl(tb
, cnt
);
776 local_irq_restore(flags
);
779 /* Send cross call to all processors mentioned in MASK_P
780 * except self. Really, there are only two cases currently,
781 * "cpu_online_mask" and "mm_cpumask(mm)".
783 static void smp_cross_call_masked(unsigned long *func
, u32 ctx
, u64 data1
, u64 data2
, const cpumask_t
*mask
)
785 u64 data0
= (((u64
)ctx
)<<32 | (((u64
)func
) & 0xffffffff));
787 xcall_deliver(data0
, data1
, data2
, mask
);
790 /* Send cross call to all processors except self. */
791 static void smp_cross_call(unsigned long *func
, u32 ctx
, u64 data1
, u64 data2
)
793 smp_cross_call_masked(func
, ctx
, data1
, data2
, cpu_online_mask
);
796 extern unsigned long xcall_sync_tick
;
798 static void smp_start_sync_tick_client(int cpu
)
800 xcall_deliver((u64
) &xcall_sync_tick
, 0, 0,
804 extern unsigned long xcall_call_function
;
806 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
808 xcall_deliver((u64
) &xcall_call_function
, 0, 0, mask
);
811 extern unsigned long xcall_call_function_single
;
813 void arch_send_call_function_single_ipi(int cpu
)
815 xcall_deliver((u64
) &xcall_call_function_single
, 0, 0,
819 void __irq_entry
smp_call_function_client(int irq
, struct pt_regs
*regs
)
821 clear_softint(1 << irq
);
822 generic_smp_call_function_interrupt();
825 void __irq_entry
smp_call_function_single_client(int irq
, struct pt_regs
*regs
)
827 clear_softint(1 << irq
);
828 generic_smp_call_function_single_interrupt();
831 static void tsb_sync(void *info
)
833 struct trap_per_cpu
*tp
= &trap_block
[raw_smp_processor_id()];
834 struct mm_struct
*mm
= info
;
836 /* It is not valid to test "current->active_mm == mm" here.
838 * The value of "current" is not changed atomically with
839 * switch_mm(). But that's OK, we just need to check the
840 * current cpu's trap block PGD physical address.
842 if (tp
->pgd_paddr
== __pa(mm
->pgd
))
843 tsb_context_switch(mm
);
846 void smp_tsb_sync(struct mm_struct
*mm
)
848 smp_call_function_many(mm_cpumask(mm
), tsb_sync
, mm
, 1);
851 extern unsigned long xcall_flush_tlb_mm
;
852 extern unsigned long xcall_flush_tlb_pending
;
853 extern unsigned long xcall_flush_tlb_kernel_range
;
854 extern unsigned long xcall_fetch_glob_regs
;
855 extern unsigned long xcall_fetch_glob_pmu
;
856 extern unsigned long xcall_fetch_glob_pmu_n4
;
857 extern unsigned long xcall_receive_signal
;
858 extern unsigned long xcall_new_mmu_context_version
;
860 extern unsigned long xcall_kgdb_capture
;
863 #ifdef DCACHE_ALIASING_POSSIBLE
864 extern unsigned long xcall_flush_dcache_page_cheetah
;
866 extern unsigned long xcall_flush_dcache_page_spitfire
;
868 #ifdef CONFIG_DEBUG_DCFLUSH
869 extern atomic_t dcpage_flushes
;
870 extern atomic_t dcpage_flushes_xcall
;
873 static inline void __local_flush_dcache_page(struct page
*page
)
875 #ifdef DCACHE_ALIASING_POSSIBLE
876 __flush_dcache_page(page_address(page
),
877 ((tlb_type
== spitfire
) &&
878 page_mapping(page
) != NULL
));
880 if (page_mapping(page
) != NULL
&&
881 tlb_type
== spitfire
)
882 __flush_icache_page(__pa(page_address(page
)));
886 void smp_flush_dcache_page_impl(struct page
*page
, int cpu
)
890 if (tlb_type
== hypervisor
)
893 #ifdef CONFIG_DEBUG_DCFLUSH
894 atomic_inc(&dcpage_flushes
);
897 this_cpu
= get_cpu();
899 if (cpu
== this_cpu
) {
900 __local_flush_dcache_page(page
);
901 } else if (cpu_online(cpu
)) {
902 void *pg_addr
= page_address(page
);
905 if (tlb_type
== spitfire
) {
906 data0
= ((u64
)&xcall_flush_dcache_page_spitfire
);
907 if (page_mapping(page
) != NULL
)
908 data0
|= ((u64
)1 << 32);
909 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
910 #ifdef DCACHE_ALIASING_POSSIBLE
911 data0
= ((u64
)&xcall_flush_dcache_page_cheetah
);
915 xcall_deliver(data0
, __pa(pg_addr
),
916 (u64
) pg_addr
, cpumask_of(cpu
));
917 #ifdef CONFIG_DEBUG_DCFLUSH
918 atomic_inc(&dcpage_flushes_xcall
);
926 void flush_dcache_page_all(struct mm_struct
*mm
, struct page
*page
)
931 if (tlb_type
== hypervisor
)
936 #ifdef CONFIG_DEBUG_DCFLUSH
937 atomic_inc(&dcpage_flushes
);
940 pg_addr
= page_address(page
);
941 if (tlb_type
== spitfire
) {
942 data0
= ((u64
)&xcall_flush_dcache_page_spitfire
);
943 if (page_mapping(page
) != NULL
)
944 data0
|= ((u64
)1 << 32);
945 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
946 #ifdef DCACHE_ALIASING_POSSIBLE
947 data0
= ((u64
)&xcall_flush_dcache_page_cheetah
);
951 xcall_deliver(data0
, __pa(pg_addr
),
952 (u64
) pg_addr
, cpu_online_mask
);
953 #ifdef CONFIG_DEBUG_DCFLUSH
954 atomic_inc(&dcpage_flushes_xcall
);
957 __local_flush_dcache_page(page
);
962 void __irq_entry
smp_new_mmu_context_version_client(int irq
, struct pt_regs
*regs
)
964 struct mm_struct
*mm
;
967 clear_softint(1 << irq
);
969 /* See if we need to allocate a new TLB context because
970 * the version of the one we are using is now out of date.
972 mm
= current
->active_mm
;
973 if (unlikely(!mm
|| (mm
== &init_mm
)))
976 spin_lock_irqsave(&mm
->context
.lock
, flags
);
978 if (unlikely(!CTX_VALID(mm
->context
)))
979 get_new_mmu_context(mm
);
981 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
983 load_secondary_context(mm
);
984 __flush_tlb_mm(CTX_HWBITS(mm
->context
),
988 void smp_new_mmu_context_version(void)
990 smp_cross_call(&xcall_new_mmu_context_version
, 0, 0, 0);
994 void kgdb_roundup_cpus(unsigned long flags
)
996 smp_cross_call(&xcall_kgdb_capture
, 0, 0, 0);
1000 void smp_fetch_global_regs(void)
1002 smp_cross_call(&xcall_fetch_glob_regs
, 0, 0, 0);
1005 void smp_fetch_global_pmu(void)
1007 if (tlb_type
== hypervisor
&&
1008 sun4v_chip_type
>= SUN4V_CHIP_NIAGARA4
)
1009 smp_cross_call(&xcall_fetch_glob_pmu_n4
, 0, 0, 0);
1011 smp_cross_call(&xcall_fetch_glob_pmu
, 0, 0, 0);
1014 /* We know that the window frames of the user have been flushed
1015 * to the stack before we get here because all callers of us
1016 * are flush_tlb_*() routines, and these run after flush_cache_*()
1017 * which performs the flushw.
1019 * The SMP TLB coherency scheme we use works as follows:
1021 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1022 * space has (potentially) executed on, this is the heuristic
1023 * we use to avoid doing cross calls.
1025 * Also, for flushing from kswapd and also for clones, we
1026 * use cpu_vm_mask as the list of cpus to make run the TLB.
1028 * 2) TLB context numbers are shared globally across all processors
1029 * in the system, this allows us to play several games to avoid
1032 * One invariant is that when a cpu switches to a process, and
1033 * that processes tsk->active_mm->cpu_vm_mask does not have the
1034 * current cpu's bit set, that tlb context is flushed locally.
1036 * If the address space is non-shared (ie. mm->count == 1) we avoid
1037 * cross calls when we want to flush the currently running process's
1038 * tlb state. This is done by clearing all cpu bits except the current
1039 * processor's in current->mm->cpu_vm_mask and performing the
1040 * flush locally only. This will force any subsequent cpus which run
1041 * this task to flush the context from the local tlb if the process
1042 * migrates to another cpu (again).
1044 * 3) For shared address spaces (threads) and swapping we bite the
1045 * bullet for most cases and perform the cross call (but only to
1046 * the cpus listed in cpu_vm_mask).
1048 * The performance gain from "optimizing" away the cross call for threads is
1049 * questionable (in theory the big win for threads is the massive sharing of
1050 * address space state across processors).
1053 /* This currently is only used by the hugetlb arch pre-fault
1054 * hook on UltraSPARC-III+ and later when changing the pagesize
1055 * bits of the context register for an address space.
1057 void smp_flush_tlb_mm(struct mm_struct
*mm
)
1059 u32 ctx
= CTX_HWBITS(mm
->context
);
1060 int cpu
= get_cpu();
1062 if (atomic_read(&mm
->mm_users
) == 1) {
1063 cpumask_copy(mm_cpumask(mm
), cpumask_of(cpu
));
1064 goto local_flush_and_out
;
1067 smp_cross_call_masked(&xcall_flush_tlb_mm
,
1071 local_flush_and_out
:
1072 __flush_tlb_mm(ctx
, SECONDARY_CONTEXT
);
1077 void smp_flush_tlb_pending(struct mm_struct
*mm
, unsigned long nr
, unsigned long *vaddrs
)
1079 u32 ctx
= CTX_HWBITS(mm
->context
);
1080 int cpu
= get_cpu();
1082 if (mm
== current
->mm
&& atomic_read(&mm
->mm_users
) == 1)
1083 cpumask_copy(mm_cpumask(mm
), cpumask_of(cpu
));
1085 smp_cross_call_masked(&xcall_flush_tlb_pending
,
1086 ctx
, nr
, (unsigned long) vaddrs
,
1089 __flush_tlb_pending(ctx
, nr
, vaddrs
);
1094 void smp_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
1097 end
= PAGE_ALIGN(end
);
1099 smp_cross_call(&xcall_flush_tlb_kernel_range
,
1102 __flush_tlb_kernel_range(start
, end
);
1107 /* #define CAPTURE_DEBUG */
1108 extern unsigned long xcall_capture
;
1110 static atomic_t smp_capture_depth
= ATOMIC_INIT(0);
1111 static atomic_t smp_capture_registry
= ATOMIC_INIT(0);
1112 static unsigned long penguins_are_doing_time
;
1114 void smp_capture(void)
1116 int result
= atomic_add_ret(1, &smp_capture_depth
);
1119 int ncpus
= num_online_cpus();
1121 #ifdef CAPTURE_DEBUG
1122 printk("CPU[%d]: Sending penguins to jail...",
1123 smp_processor_id());
1125 penguins_are_doing_time
= 1;
1126 atomic_inc(&smp_capture_registry
);
1127 smp_cross_call(&xcall_capture
, 0, 0, 0);
1128 while (atomic_read(&smp_capture_registry
) != ncpus
)
1130 #ifdef CAPTURE_DEBUG
1136 void smp_release(void)
1138 if (atomic_dec_and_test(&smp_capture_depth
)) {
1139 #ifdef CAPTURE_DEBUG
1140 printk("CPU[%d]: Giving pardon to "
1141 "imprisoned penguins\n",
1142 smp_processor_id());
1144 penguins_are_doing_time
= 0;
1145 membar_safe("#StoreLoad");
1146 atomic_dec(&smp_capture_registry
);
1150 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1151 * set, so they can service tlb flush xcalls...
1153 extern void prom_world(int);
1155 void __irq_entry
smp_penguin_jailcell(int irq
, struct pt_regs
*regs
)
1157 clear_softint(1 << irq
);
1161 __asm__
__volatile__("flushw");
1163 atomic_inc(&smp_capture_registry
);
1164 membar_safe("#StoreLoad");
1165 while (penguins_are_doing_time
)
1167 atomic_dec(&smp_capture_registry
);
1173 /* /proc/profile writes can call this, don't __init it please. */
1174 int setup_profiling_timer(unsigned int multiplier
)
1179 void __init
smp_prepare_cpus(unsigned int max_cpus
)
1183 void __devinit
smp_prepare_boot_cpu(void)
1187 void __init
smp_setup_processor_id(void)
1189 if (tlb_type
== spitfire
)
1190 xcall_deliver_impl
= spitfire_xcall_deliver
;
1191 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
1192 xcall_deliver_impl
= cheetah_xcall_deliver
;
1194 xcall_deliver_impl
= hypervisor_xcall_deliver
;
1197 void __devinit
smp_fill_in_sib_core_maps(void)
1201 for_each_present_cpu(i
) {
1204 cpumask_clear(&cpu_core_map
[i
]);
1205 if (cpu_data(i
).core_id
== 0) {
1206 cpumask_set_cpu(i
, &cpu_core_map
[i
]);
1210 for_each_present_cpu(j
) {
1211 if (cpu_data(i
).core_id
==
1212 cpu_data(j
).core_id
)
1213 cpumask_set_cpu(j
, &cpu_core_map
[i
]);
1217 for_each_present_cpu(i
) {
1220 cpumask_clear(&per_cpu(cpu_sibling_map
, i
));
1221 if (cpu_data(i
).proc_id
== -1) {
1222 cpumask_set_cpu(i
, &per_cpu(cpu_sibling_map
, i
));
1226 for_each_present_cpu(j
) {
1227 if (cpu_data(i
).proc_id
==
1228 cpu_data(j
).proc_id
)
1229 cpumask_set_cpu(j
, &per_cpu(cpu_sibling_map
, i
));
1234 int __cpuinit
__cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
1236 int ret
= smp_boot_one_cpu(cpu
, tidle
);
1239 cpumask_set_cpu(cpu
, &smp_commenced_mask
);
1240 while (!cpu_online(cpu
))
1242 if (!cpu_online(cpu
)) {
1245 /* On SUN4V, writes to %tick and %stick are
1248 if (tlb_type
!= hypervisor
)
1249 smp_synchronize_one_tick(cpu
);
1255 #ifdef CONFIG_HOTPLUG_CPU
1256 void cpu_play_dead(void)
1258 int cpu
= smp_processor_id();
1259 unsigned long pstate
;
1263 if (tlb_type
== hypervisor
) {
1264 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
1266 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO
,
1267 tb
->cpu_mondo_pa
, 0);
1268 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO
,
1269 tb
->dev_mondo_pa
, 0);
1270 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR
,
1271 tb
->resum_mondo_pa
, 0);
1272 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR
,
1273 tb
->nonresum_mondo_pa
, 0);
1276 cpumask_clear_cpu(cpu
, &smp_commenced_mask
);
1277 membar_safe("#Sync");
1279 local_irq_disable();
1281 __asm__
__volatile__(
1282 "rdpr %%pstate, %0\n\t"
1283 "wrpr %0, %1, %%pstate"
1291 int __cpu_disable(void)
1293 int cpu
= smp_processor_id();
1297 for_each_cpu(i
, &cpu_core_map
[cpu
])
1298 cpumask_clear_cpu(cpu
, &cpu_core_map
[i
]);
1299 cpumask_clear(&cpu_core_map
[cpu
]);
1301 for_each_cpu(i
, &per_cpu(cpu_sibling_map
, cpu
))
1302 cpumask_clear_cpu(cpu
, &per_cpu(cpu_sibling_map
, i
));
1303 cpumask_clear(&per_cpu(cpu_sibling_map
, cpu
));
1312 /* Make sure no interrupts point to this cpu. */
1317 local_irq_disable();
1319 set_cpu_online(cpu
, false);
1326 void __cpu_die(unsigned int cpu
)
1330 for (i
= 0; i
< 100; i
++) {
1332 if (!cpumask_test_cpu(cpu
, &smp_commenced_mask
))
1336 if (cpumask_test_cpu(cpu
, &smp_commenced_mask
)) {
1337 printk(KERN_ERR
"CPU %u didn't die...\n", cpu
);
1339 #if defined(CONFIG_SUN_LDOMS)
1340 unsigned long hv_err
;
1344 hv_err
= sun4v_cpu_stop(cpu
);
1345 if (hv_err
== HV_EOK
) {
1346 set_cpu_present(cpu
, false);
1349 } while (--limit
> 0);
1351 printk(KERN_ERR
"sun4v_cpu_stop() fails err=%lu\n",
1359 void __init
smp_cpus_done(unsigned int max_cpus
)
1364 void smp_send_reschedule(int cpu
)
1366 xcall_deliver((u64
) &xcall_receive_signal
, 0, 0,
1370 void __irq_entry
smp_receive_signal_client(int irq
, struct pt_regs
*regs
)
1372 clear_softint(1 << irq
);
1376 /* This is a nop because we capture all other cpus
1377 * anyways when making the PROM active.
1379 void smp_send_stop(void)
1384 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1385 * @cpu: cpu to allocate for
1386 * @size: size allocation in bytes
1389 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
1390 * does the right thing for NUMA regardless of the current
1394 * Pointer to the allocated area on success, NULL on failure.
1396 static void * __init
pcpu_alloc_bootmem(unsigned int cpu
, size_t size
,
1399 const unsigned long goal
= __pa(MAX_DMA_ADDRESS
);
1400 #ifdef CONFIG_NEED_MULTIPLE_NODES
1401 int node
= cpu_to_node(cpu
);
1404 if (!node_online(node
) || !NODE_DATA(node
)) {
1405 ptr
= __alloc_bootmem(size
, align
, goal
);
1406 pr_info("cpu %d has no node %d or node-local memory\n",
1408 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1409 cpu
, size
, __pa(ptr
));
1411 ptr
= __alloc_bootmem_node(NODE_DATA(node
),
1413 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1414 "%016lx\n", cpu
, size
, node
, __pa(ptr
));
1418 return __alloc_bootmem(size
, align
, goal
);
1422 static void __init
pcpu_free_bootmem(void *ptr
, size_t size
)
1424 free_bootmem(__pa(ptr
), size
);
1427 static int __init
pcpu_cpu_distance(unsigned int from
, unsigned int to
)
1429 if (cpu_to_node(from
) == cpu_to_node(to
))
1430 return LOCAL_DISTANCE
;
1432 return REMOTE_DISTANCE
;
1435 static void __init
pcpu_populate_pte(unsigned long addr
)
1437 pgd_t
*pgd
= pgd_offset_k(addr
);
1441 pud
= pud_offset(pgd
, addr
);
1442 if (pud_none(*pud
)) {
1445 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1446 pud_populate(&init_mm
, pud
, new);
1449 pmd
= pmd_offset(pud
, addr
);
1450 if (!pmd_present(*pmd
)) {
1453 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1454 pmd_populate_kernel(&init_mm
, pmd
, new);
1458 void __init
setup_per_cpu_areas(void)
1460 unsigned long delta
;
1464 if (pcpu_chosen_fc
!= PCPU_FC_PAGE
) {
1465 rc
= pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE
,
1466 PERCPU_DYNAMIC_RESERVE
, 4 << 20,
1471 pr_warning("PERCPU: %s allocator failed (%d), "
1472 "falling back to page size\n",
1473 pcpu_fc_names
[pcpu_chosen_fc
], rc
);
1476 rc
= pcpu_page_first_chunk(PERCPU_MODULE_RESERVE
,
1481 panic("cannot initialize percpu area (err=%d)", rc
);
1483 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
1484 for_each_possible_cpu(cpu
)
1485 __per_cpu_offset(cpu
) = delta
+ pcpu_unit_offsets
[cpu
];
1487 /* Setup %g5 for the boot cpu. */
1488 __local_per_cpu_offset
= __per_cpu_offset(smp_processor_id());
1490 of_fill_in_cpu_data();
1491 if (tlb_type
== hypervisor
)
1492 mdesc_fill_in_cpu_data(cpu_all_mask
);