1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/jiffies.h>
23 #include <linux/profile.h>
24 #include <linux/bootmem.h>
27 #include <asm/ptrace.h>
28 #include <asm/atomic.h>
29 #include <asm/tlbflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/cpudata.h>
34 #include <asm/irq_regs.h>
36 #include <asm/pgtable.h>
37 #include <asm/oplib.h>
38 #include <asm/uaccess.h>
39 #include <asm/timer.h>
40 #include <asm/starfire.h>
42 #include <asm/sections.h>
45 extern void calibrate_delay(void);
47 /* Please don't make this stuff initdata!!! --DaveM */
48 static unsigned char boot_cpu_id
;
50 cpumask_t cpu_online_map __read_mostly
= CPU_MASK_NONE
;
51 cpumask_t phys_cpu_present_map __read_mostly
= CPU_MASK_NONE
;
52 cpumask_t cpu_sibling_map
[NR_CPUS
] __read_mostly
=
53 { [0 ... NR_CPUS
-1] = CPU_MASK_NONE
};
54 static cpumask_t smp_commenced_mask
;
55 static cpumask_t cpu_callout_map
;
57 void smp_info(struct seq_file
*m
)
61 seq_printf(m
, "State:\n");
62 for_each_online_cpu(i
)
63 seq_printf(m
, "CPU%d:\t\tonline\n", i
);
66 void smp_bogo(struct seq_file
*m
)
70 for_each_online_cpu(i
)
72 "Cpu%dBogo\t: %lu.%02lu\n"
73 "Cpu%dClkTck\t: %016lx\n",
74 i
, cpu_data(i
).udelay_val
/ (500000/HZ
),
75 (cpu_data(i
).udelay_val
/ (5000/HZ
)) % 100,
76 i
, cpu_data(i
).clock_tick
);
79 void __init
smp_store_cpu_info(int id
)
81 struct device_node
*dp
;
84 /* multiplier and counter set by
85 smp_setup_percpu_timer() */
86 cpu_data(id
).udelay_val
= loops_per_jiffy
;
88 cpu_find_by_mid(id
, &dp
);
89 cpu_data(id
).clock_tick
=
90 of_getintprop_default(dp
, "clock-frequency", 0);
92 def
= ((tlb_type
== hypervisor
) ? (8 * 1024) : (16 * 1024));
93 cpu_data(id
).dcache_size
=
94 of_getintprop_default(dp
, "dcache-size", def
);
97 cpu_data(id
).dcache_line_size
=
98 of_getintprop_default(dp
, "dcache-line-size", def
);
101 cpu_data(id
).icache_size
=
102 of_getintprop_default(dp
, "icache-size", def
);
105 cpu_data(id
).icache_line_size
=
106 of_getintprop_default(dp
, "icache-line-size", def
);
108 def
= ((tlb_type
== hypervisor
) ?
111 cpu_data(id
).ecache_size
=
112 of_getintprop_default(dp
, "ecache-size", def
);
115 cpu_data(id
).ecache_line_size
=
116 of_getintprop_default(dp
, "ecache-line-size", def
);
118 printk("CPU[%d]: Caches "
119 "D[sz(%d):line_sz(%d)] "
120 "I[sz(%d):line_sz(%d)] "
121 "E[sz(%d):line_sz(%d)]\n",
123 cpu_data(id
).dcache_size
, cpu_data(id
).dcache_line_size
,
124 cpu_data(id
).icache_size
, cpu_data(id
).icache_line_size
,
125 cpu_data(id
).ecache_size
, cpu_data(id
).ecache_line_size
);
128 static void smp_setup_percpu_timer(void);
130 static volatile unsigned long callin_flag
= 0;
132 void __init
smp_callin(void)
134 int cpuid
= hard_smp_processor_id();
136 __local_per_cpu_offset
= __per_cpu_offset(cpuid
);
138 if (tlb_type
== hypervisor
)
139 sun4v_ktsb_register();
143 smp_setup_percpu_timer();
145 if (cheetah_pcache_forced_on
)
146 cheetah_enable_pcache();
151 smp_store_cpu_info(cpuid
);
153 __asm__
__volatile__("membar #Sync\n\t"
154 "flush %%g6" : : : "memory");
156 /* Clear this or we will die instantly when we
157 * schedule back to this idler...
159 current_thread_info()->new_child
= 0;
161 /* Attach to the address space of init_task. */
162 atomic_inc(&init_mm
.mm_count
);
163 current
->active_mm
= &init_mm
;
165 while (!cpu_isset(cpuid
, smp_commenced_mask
))
168 cpu_set(cpuid
, cpu_online_map
);
170 /* idle thread is expected to have preempt disabled */
176 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
177 panic("SMP bolixed\n");
180 static unsigned long current_tick_offset __read_mostly
;
182 /* This tick register synchronization scheme is taken entirely from
183 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
185 * The only change I've made is to rework it so that the master
186 * initiates the synchonization instead of the slave. -DaveM
190 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
192 #define NUM_ROUNDS 64 /* magic value */
193 #define NUM_ITERS 5 /* likewise */
195 static DEFINE_SPINLOCK(itc_sync_lock
);
196 static unsigned long go
[SLAVE
+ 1];
198 #define DEBUG_TICK_SYNC 0
200 static inline long get_delta (long *rt
, long *master
)
202 unsigned long best_t0
= 0, best_t1
= ~0UL, best_tm
= 0;
203 unsigned long tcenter
, t0
, t1
, tm
;
206 for (i
= 0; i
< NUM_ITERS
; i
++) {
207 t0
= tick_ops
->get_tick();
210 while (!(tm
= go
[SLAVE
]))
214 t1
= tick_ops
->get_tick();
216 if (t1
- t0
< best_t1
- best_t0
)
217 best_t0
= t0
, best_t1
= t1
, best_tm
= tm
;
220 *rt
= best_t1
- best_t0
;
221 *master
= best_tm
- best_t0
;
223 /* average best_t0 and best_t1 without overflow: */
224 tcenter
= (best_t0
/2 + best_t1
/2);
225 if (best_t0
% 2 + best_t1
% 2 == 2)
227 return tcenter
- best_tm
;
230 void smp_synchronize_tick_client(void)
232 long i
, delta
, adj
, adjust_latency
= 0, done
= 0;
233 unsigned long flags
, rt
, master_time_stamp
, bound
;
236 long rt
; /* roundtrip time */
237 long master
; /* master's timestamp */
238 long diff
; /* difference between midpoint and master's timestamp */
239 long lat
; /* estimate of itc adjustment latency */
248 local_irq_save(flags
);
250 for (i
= 0; i
< NUM_ROUNDS
; i
++) {
251 delta
= get_delta(&rt
, &master_time_stamp
);
253 done
= 1; /* let's lock on to this... */
259 adjust_latency
+= -delta
;
260 adj
= -delta
+ adjust_latency
/4;
264 tick_ops
->add_tick(adj
, current_tick_offset
);
268 t
[i
].master
= master_time_stamp
;
270 t
[i
].lat
= adjust_latency
/4;
274 local_irq_restore(flags
);
277 for (i
= 0; i
< NUM_ROUNDS
; i
++)
278 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
279 t
[i
].rt
, t
[i
].master
, t
[i
].diff
, t
[i
].lat
);
282 printk(KERN_INFO
"CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
283 "maxerr %lu cycles)\n", smp_processor_id(), delta
, rt
);
286 static void smp_start_sync_tick_client(int cpu
);
288 static void smp_synchronize_one_tick(int cpu
)
290 unsigned long flags
, i
;
294 smp_start_sync_tick_client(cpu
);
296 /* wait for client to be ready */
300 /* now let the client proceed into his loop */
304 spin_lock_irqsave(&itc_sync_lock
, flags
);
306 for (i
= 0; i
< NUM_ROUNDS
*NUM_ITERS
; i
++) {
311 go
[SLAVE
] = tick_ops
->get_tick();
315 spin_unlock_irqrestore(&itc_sync_lock
, flags
);
318 extern void sun4v_init_mondo_queues(int use_bootmem
, int cpu
, int alloc
, int load
);
320 extern unsigned long sparc64_cpu_startup
;
322 /* The OBP cpu startup callback truncates the 3rd arg cookie to
323 * 32-bits (I think) so to be safe we have it read the pointer
324 * contained here so we work on >4GB machines. -DaveM
326 static struct thread_info
*cpu_new_thread
= NULL
;
328 static int __devinit
smp_boot_one_cpu(unsigned int cpu
)
330 unsigned long entry
=
331 (unsigned long)(&sparc64_cpu_startup
);
332 unsigned long cookie
=
333 (unsigned long)(&cpu_new_thread
);
334 struct task_struct
*p
;
339 cpu_new_thread
= task_thread_info(p
);
340 cpu_set(cpu
, cpu_callout_map
);
342 if (tlb_type
== hypervisor
) {
343 /* Alloc the mondo queues, cpu will load them. */
344 sun4v_init_mondo_queues(0, cpu
, 1, 0);
346 prom_startcpu_cpuid(cpu
, entry
, cookie
);
348 struct device_node
*dp
;
350 cpu_find_by_mid(cpu
, &dp
);
351 prom_startcpu(dp
->node
, entry
, cookie
);
354 for (timeout
= 0; timeout
< 5000000; timeout
++) {
363 printk("Processor %d is stuck.\n", cpu
);
364 cpu_clear(cpu
, cpu_callout_map
);
367 cpu_new_thread
= NULL
;
372 static void spitfire_xcall_helper(u64 data0
, u64 data1
, u64 data2
, u64 pstate
, unsigned long cpu
)
377 if (this_is_starfire
) {
378 /* map to real upaid */
379 cpu
= (((cpu
& 0x3c) << 1) |
380 ((cpu
& 0x40) >> 4) |
384 target
= (cpu
<< 14) | 0x70;
386 /* Ok, this is the real Spitfire Errata #54.
387 * One must read back from a UDB internal register
388 * after writes to the UDB interrupt dispatch, but
389 * before the membar Sync for that write.
390 * So we use the high UDB control register (ASI 0x7f,
391 * ADDR 0x20) for the dummy read. -DaveM
394 __asm__
__volatile__(
395 "wrpr %1, %2, %%pstate\n\t"
396 "stxa %4, [%0] %3\n\t"
397 "stxa %5, [%0+%8] %3\n\t"
399 "stxa %6, [%0+%8] %3\n\t"
401 "stxa %%g0, [%7] %3\n\t"
404 "ldxa [%%g1] 0x7f, %%g0\n\t"
407 : "r" (pstate
), "i" (PSTATE_IE
), "i" (ASI_INTR_W
),
408 "r" (data0
), "r" (data1
), "r" (data2
), "r" (target
),
409 "r" (0x10), "0" (tmp
)
412 /* NOTE: PSTATE_IE is still clear. */
415 __asm__
__volatile__("ldxa [%%g0] %1, %0"
417 : "i" (ASI_INTR_DISPATCH_STAT
));
419 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
426 } while (result
& 0x1);
427 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
430 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
431 smp_processor_id(), result
);
438 static __inline__
void spitfire_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
443 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
444 for_each_cpu_mask(i
, mask
)
445 spitfire_xcall_helper(data0
, data1
, data2
, pstate
, i
);
448 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
449 * packet, but we have no use for that. However we do take advantage of
450 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
452 static void cheetah_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
455 int nack_busy_id
, is_jbus
;
457 if (cpus_empty(mask
))
460 /* Unfortunately, someone at Sun had the brilliant idea to make the
461 * busy/nack fields hard-coded by ITID number for this Ultra-III
462 * derivative processor.
464 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
465 is_jbus
= ((ver
>> 32) == __JALAPENO_ID
||
466 (ver
>> 32) == __SERRANO_ID
);
468 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
471 __asm__
__volatile__("wrpr %0, %1, %%pstate\n\t"
472 : : "r" (pstate
), "i" (PSTATE_IE
));
474 /* Setup the dispatch data registers. */
475 __asm__
__volatile__("stxa %0, [%3] %6\n\t"
476 "stxa %1, [%4] %6\n\t"
477 "stxa %2, [%5] %6\n\t"
480 : "r" (data0
), "r" (data1
), "r" (data2
),
481 "r" (0x40), "r" (0x50), "r" (0x60),
488 for_each_cpu_mask(i
, mask
) {
489 u64 target
= (i
<< 14) | 0x70;
492 target
|= (nack_busy_id
<< 24);
493 __asm__
__volatile__(
494 "stxa %%g0, [%0] %1\n\t"
497 : "r" (target
), "i" (ASI_INTR_W
));
502 /* Now, poll for completion. */
507 stuck
= 100000 * nack_busy_id
;
509 __asm__
__volatile__("ldxa [%%g0] %1, %0"
510 : "=r" (dispatch_stat
)
511 : "i" (ASI_INTR_DISPATCH_STAT
));
512 if (dispatch_stat
== 0UL) {
513 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
519 } while (dispatch_stat
& 0x5555555555555555UL
);
521 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
524 if ((dispatch_stat
& ~(0x5555555555555555UL
)) == 0) {
525 /* Busy bits will not clear, continue instead
526 * of freezing up on this cpu.
528 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
529 smp_processor_id(), dispatch_stat
);
531 int i
, this_busy_nack
= 0;
533 /* Delay some random time with interrupts enabled
534 * to prevent deadlock.
536 udelay(2 * nack_busy_id
);
538 /* Clear out the mask bits for cpus which did not
541 for_each_cpu_mask(i
, mask
) {
545 check_mask
= (0x2UL
<< (2*i
));
547 check_mask
= (0x2UL
<<
549 if ((dispatch_stat
& check_mask
) == 0)
559 /* Multi-cpu list version. */
560 static void hypervisor_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
562 struct trap_per_cpu
*tb
;
565 cpumask_t error_mask
;
566 unsigned long flags
, status
;
567 int cnt
, retries
, this_cpu
, prev_sent
, i
;
569 /* We have to do this whole thing with interrupts fully disabled.
570 * Otherwise if we send an xcall from interrupt context it will
571 * corrupt both our mondo block and cpu list state.
573 * One consequence of this is that we cannot use timeout mechanisms
574 * that depend upon interrupts being delivered locally. So, for
575 * example, we cannot sample jiffies and expect it to advance.
577 * Fortunately, udelay() uses %stick/%tick so we can use that.
579 local_irq_save(flags
);
581 this_cpu
= smp_processor_id();
582 tb
= &trap_block
[this_cpu
];
584 mondo
= __va(tb
->cpu_mondo_block_pa
);
590 cpu_list
= __va(tb
->cpu_list_pa
);
592 /* Setup the initial cpu list. */
594 for_each_cpu_mask(i
, mask
)
597 cpus_clear(error_mask
);
601 int forward_progress
, n_sent
;
603 status
= sun4v_cpu_mondo_send(cnt
,
605 tb
->cpu_mondo_block_pa
);
607 /* HV_EOK means all cpus received the xcall, we're done. */
608 if (likely(status
== HV_EOK
))
611 /* First, see if we made any forward progress.
613 * The hypervisor indicates successful sends by setting
614 * cpu list entries to the value 0xffff.
617 for (i
= 0; i
< cnt
; i
++) {
618 if (likely(cpu_list
[i
] == 0xffff))
622 forward_progress
= 0;
623 if (n_sent
> prev_sent
)
624 forward_progress
= 1;
628 /* If we get a HV_ECPUERROR, then one or more of the cpus
629 * in the list are in error state. Use the cpu_state()
630 * hypervisor call to find out which cpus are in error state.
632 if (unlikely(status
== HV_ECPUERROR
)) {
633 for (i
= 0; i
< cnt
; i
++) {
641 err
= sun4v_cpu_state(cpu
);
643 err
== HV_CPU_STATE_ERROR
) {
644 cpu_list
[i
] = 0xffff;
645 cpu_set(cpu
, error_mask
);
648 } else if (unlikely(status
!= HV_EWOULDBLOCK
))
649 goto fatal_mondo_error
;
651 /* Don't bother rewriting the CPU list, just leave the
652 * 0xffff and non-0xffff entries in there and the
653 * hypervisor will do the right thing.
655 * Only advance timeout state if we didn't make any
658 if (unlikely(!forward_progress
)) {
659 if (unlikely(++retries
> 10000))
660 goto fatal_mondo_timeout
;
662 /* Delay a little bit to let other cpus catch up
663 * on their cpu mondo queue work.
669 local_irq_restore(flags
);
671 if (unlikely(!cpus_empty(error_mask
)))
672 goto fatal_mondo_cpu_error
;
676 fatal_mondo_cpu_error
:
677 printk(KERN_CRIT
"CPU[%d]: SUN4V mondo cpu error, some target cpus "
678 "were in error state\n",
680 printk(KERN_CRIT
"CPU[%d]: Error mask [ ", this_cpu
);
681 for_each_cpu_mask(i
, error_mask
)
687 local_irq_restore(flags
);
688 printk(KERN_CRIT
"CPU[%d]: SUN4V mondo timeout, no forward "
689 " progress after %d retries.\n",
691 goto dump_cpu_list_and_out
;
694 local_irq_restore(flags
);
695 printk(KERN_CRIT
"CPU[%d]: Unexpected SUN4V mondo error %lu\n",
697 printk(KERN_CRIT
"CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
698 "mondo_block_pa(%lx)\n",
699 this_cpu
, cnt
, tb
->cpu_list_pa
, tb
->cpu_mondo_block_pa
);
701 dump_cpu_list_and_out
:
702 printk(KERN_CRIT
"CPU[%d]: CPU list [ ", this_cpu
);
703 for (i
= 0; i
< cnt
; i
++)
704 printk("%u ", cpu_list
[i
]);
708 /* Send cross call to all processors mentioned in MASK
711 static void smp_cross_call_masked(unsigned long *func
, u32 ctx
, u64 data1
, u64 data2
, cpumask_t mask
)
713 u64 data0
= (((u64
)ctx
)<<32 | (((u64
)func
) & 0xffffffff));
714 int this_cpu
= get_cpu();
716 cpus_and(mask
, mask
, cpu_online_map
);
717 cpu_clear(this_cpu
, mask
);
719 if (tlb_type
== spitfire
)
720 spitfire_xcall_deliver(data0
, data1
, data2
, mask
);
721 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
722 cheetah_xcall_deliver(data0
, data1
, data2
, mask
);
724 hypervisor_xcall_deliver(data0
, data1
, data2
, mask
);
725 /* NOTE: Caller runs local copy on master. */
730 extern unsigned long xcall_sync_tick
;
732 static void smp_start_sync_tick_client(int cpu
)
734 cpumask_t mask
= cpumask_of_cpu(cpu
);
736 smp_cross_call_masked(&xcall_sync_tick
,
740 /* Send cross call to all processors except self. */
741 #define smp_cross_call(func, ctx, data1, data2) \
742 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
744 struct call_data_struct
{
745 void (*func
) (void *info
);
751 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(call_lock
);
752 static struct call_data_struct
*call_data
;
754 extern unsigned long xcall_call_function
;
757 * smp_call_function(): Run a function on all other CPUs.
758 * @func: The function to run. This must be fast and non-blocking.
759 * @info: An arbitrary pointer to pass to the function.
760 * @nonatomic: currently unused.
761 * @wait: If true, wait (atomically) until function has completed on other CPUs.
763 * Returns 0 on success, else a negative status code. Does not return until
764 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
766 * You must not call this function with disabled interrupts or from a
767 * hardware interrupt handler or from a bottom half handler.
769 static int smp_call_function_mask(void (*func
)(void *info
), void *info
,
770 int nonatomic
, int wait
, cpumask_t mask
)
772 struct call_data_struct data
;
775 /* Can deadlock when called with interrupts disabled */
776 WARN_ON(irqs_disabled());
780 atomic_set(&data
.finished
, 0);
783 spin_lock(&call_lock
);
785 cpu_clear(smp_processor_id(), mask
);
786 cpus
= cpus_weight(mask
);
793 smp_cross_call_masked(&xcall_call_function
, 0, 0, 0, mask
);
795 /* Wait for response */
796 while (atomic_read(&data
.finished
) != cpus
)
800 spin_unlock(&call_lock
);
805 int smp_call_function(void (*func
)(void *info
), void *info
,
806 int nonatomic
, int wait
)
808 return smp_call_function_mask(func
, info
, nonatomic
, wait
,
812 void smp_call_function_client(int irq
, struct pt_regs
*regs
)
814 void (*func
) (void *info
) = call_data
->func
;
815 void *info
= call_data
->info
;
817 clear_softint(1 << irq
);
818 if (call_data
->wait
) {
819 /* let initiator proceed only after completion */
821 atomic_inc(&call_data
->finished
);
823 /* let initiator proceed after getting data */
824 atomic_inc(&call_data
->finished
);
829 static void tsb_sync(void *info
)
831 struct trap_per_cpu
*tp
= &trap_block
[raw_smp_processor_id()];
832 struct mm_struct
*mm
= info
;
834 /* It is not valid to test "currrent->active_mm == mm" here.
836 * The value of "current" is not changed atomically with
837 * switch_mm(). But that's OK, we just need to check the
838 * current cpu's trap block PGD physical address.
840 if (tp
->pgd_paddr
== __pa(mm
->pgd
))
841 tsb_context_switch(mm
);
844 void smp_tsb_sync(struct mm_struct
*mm
)
846 smp_call_function_mask(tsb_sync
, mm
, 0, 1, mm
->cpu_vm_mask
);
849 extern unsigned long xcall_flush_tlb_mm
;
850 extern unsigned long xcall_flush_tlb_pending
;
851 extern unsigned long xcall_flush_tlb_kernel_range
;
852 extern unsigned long xcall_report_regs
;
853 extern unsigned long xcall_receive_signal
;
854 extern unsigned long xcall_new_mmu_context_version
;
856 #ifdef DCACHE_ALIASING_POSSIBLE
857 extern unsigned long xcall_flush_dcache_page_cheetah
;
859 extern unsigned long xcall_flush_dcache_page_spitfire
;
861 #ifdef CONFIG_DEBUG_DCFLUSH
862 extern atomic_t dcpage_flushes
;
863 extern atomic_t dcpage_flushes_xcall
;
866 static __inline__
void __local_flush_dcache_page(struct page
*page
)
868 #ifdef DCACHE_ALIASING_POSSIBLE
869 __flush_dcache_page(page_address(page
),
870 ((tlb_type
== spitfire
) &&
871 page_mapping(page
) != NULL
));
873 if (page_mapping(page
) != NULL
&&
874 tlb_type
== spitfire
)
875 __flush_icache_page(__pa(page_address(page
)));
879 void smp_flush_dcache_page_impl(struct page
*page
, int cpu
)
881 cpumask_t mask
= cpumask_of_cpu(cpu
);
884 if (tlb_type
== hypervisor
)
887 #ifdef CONFIG_DEBUG_DCFLUSH
888 atomic_inc(&dcpage_flushes
);
891 this_cpu
= get_cpu();
893 if (cpu
== this_cpu
) {
894 __local_flush_dcache_page(page
);
895 } else if (cpu_online(cpu
)) {
896 void *pg_addr
= page_address(page
);
899 if (tlb_type
== spitfire
) {
901 ((u64
)&xcall_flush_dcache_page_spitfire
);
902 if (page_mapping(page
) != NULL
)
903 data0
|= ((u64
)1 << 32);
904 spitfire_xcall_deliver(data0
,
908 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
909 #ifdef DCACHE_ALIASING_POSSIBLE
911 ((u64
)&xcall_flush_dcache_page_cheetah
);
912 cheetah_xcall_deliver(data0
,
917 #ifdef CONFIG_DEBUG_DCFLUSH
918 atomic_inc(&dcpage_flushes_xcall
);
925 void flush_dcache_page_all(struct mm_struct
*mm
, struct page
*page
)
927 void *pg_addr
= page_address(page
);
928 cpumask_t mask
= cpu_online_map
;
932 if (tlb_type
== hypervisor
)
935 this_cpu
= get_cpu();
937 cpu_clear(this_cpu
, mask
);
939 #ifdef CONFIG_DEBUG_DCFLUSH
940 atomic_inc(&dcpage_flushes
);
942 if (cpus_empty(mask
))
944 if (tlb_type
== spitfire
) {
945 data0
= ((u64
)&xcall_flush_dcache_page_spitfire
);
946 if (page_mapping(page
) != NULL
)
947 data0
|= ((u64
)1 << 32);
948 spitfire_xcall_deliver(data0
,
952 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
953 #ifdef DCACHE_ALIASING_POSSIBLE
954 data0
= ((u64
)&xcall_flush_dcache_page_cheetah
);
955 cheetah_xcall_deliver(data0
,
960 #ifdef CONFIG_DEBUG_DCFLUSH
961 atomic_inc(&dcpage_flushes_xcall
);
964 __local_flush_dcache_page(page
);
969 static void __smp_receive_signal_mask(cpumask_t mask
)
971 smp_cross_call_masked(&xcall_receive_signal
, 0, 0, 0, mask
);
974 void smp_receive_signal(int cpu
)
976 cpumask_t mask
= cpumask_of_cpu(cpu
);
979 __smp_receive_signal_mask(mask
);
982 void smp_receive_signal_client(int irq
, struct pt_regs
*regs
)
984 clear_softint(1 << irq
);
987 void smp_new_mmu_context_version_client(int irq
, struct pt_regs
*regs
)
989 struct mm_struct
*mm
;
992 clear_softint(1 << irq
);
994 /* See if we need to allocate a new TLB context because
995 * the version of the one we are using is now out of date.
997 mm
= current
->active_mm
;
998 if (unlikely(!mm
|| (mm
== &init_mm
)))
1001 spin_lock_irqsave(&mm
->context
.lock
, flags
);
1003 if (unlikely(!CTX_VALID(mm
->context
)))
1004 get_new_mmu_context(mm
);
1006 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
1008 load_secondary_context(mm
);
1009 __flush_tlb_mm(CTX_HWBITS(mm
->context
),
1013 void smp_new_mmu_context_version(void)
1015 smp_cross_call(&xcall_new_mmu_context_version
, 0, 0, 0);
1018 void smp_report_regs(void)
1020 smp_cross_call(&xcall_report_regs
, 0, 0, 0);
1023 /* We know that the window frames of the user have been flushed
1024 * to the stack before we get here because all callers of us
1025 * are flush_tlb_*() routines, and these run after flush_cache_*()
1026 * which performs the flushw.
1028 * The SMP TLB coherency scheme we use works as follows:
1030 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1031 * space has (potentially) executed on, this is the heuristic
1032 * we use to avoid doing cross calls.
1034 * Also, for flushing from kswapd and also for clones, we
1035 * use cpu_vm_mask as the list of cpus to make run the TLB.
1037 * 2) TLB context numbers are shared globally across all processors
1038 * in the system, this allows us to play several games to avoid
1041 * One invariant is that when a cpu switches to a process, and
1042 * that processes tsk->active_mm->cpu_vm_mask does not have the
1043 * current cpu's bit set, that tlb context is flushed locally.
1045 * If the address space is non-shared (ie. mm->count == 1) we avoid
1046 * cross calls when we want to flush the currently running process's
1047 * tlb state. This is done by clearing all cpu bits except the current
1048 * processor's in current->active_mm->cpu_vm_mask and performing the
1049 * flush locally only. This will force any subsequent cpus which run
1050 * this task to flush the context from the local tlb if the process
1051 * migrates to another cpu (again).
1053 * 3) For shared address spaces (threads) and swapping we bite the
1054 * bullet for most cases and perform the cross call (but only to
1055 * the cpus listed in cpu_vm_mask).
1057 * The performance gain from "optimizing" away the cross call for threads is
1058 * questionable (in theory the big win for threads is the massive sharing of
1059 * address space state across processors).
1062 /* This currently is only used by the hugetlb arch pre-fault
1063 * hook on UltraSPARC-III+ and later when changing the pagesize
1064 * bits of the context register for an address space.
1066 void smp_flush_tlb_mm(struct mm_struct
*mm
)
1068 u32 ctx
= CTX_HWBITS(mm
->context
);
1069 int cpu
= get_cpu();
1071 if (atomic_read(&mm
->mm_users
) == 1) {
1072 mm
->cpu_vm_mask
= cpumask_of_cpu(cpu
);
1073 goto local_flush_and_out
;
1076 smp_cross_call_masked(&xcall_flush_tlb_mm
,
1080 local_flush_and_out
:
1081 __flush_tlb_mm(ctx
, SECONDARY_CONTEXT
);
1086 void smp_flush_tlb_pending(struct mm_struct
*mm
, unsigned long nr
, unsigned long *vaddrs
)
1088 u32 ctx
= CTX_HWBITS(mm
->context
);
1089 int cpu
= get_cpu();
1091 if (mm
== current
->active_mm
&& atomic_read(&mm
->mm_users
) == 1)
1092 mm
->cpu_vm_mask
= cpumask_of_cpu(cpu
);
1094 smp_cross_call_masked(&xcall_flush_tlb_pending
,
1095 ctx
, nr
, (unsigned long) vaddrs
,
1098 __flush_tlb_pending(ctx
, nr
, vaddrs
);
1103 void smp_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
1106 end
= PAGE_ALIGN(end
);
1108 smp_cross_call(&xcall_flush_tlb_kernel_range
,
1111 __flush_tlb_kernel_range(start
, end
);
1116 /* #define CAPTURE_DEBUG */
1117 extern unsigned long xcall_capture
;
1119 static atomic_t smp_capture_depth
= ATOMIC_INIT(0);
1120 static atomic_t smp_capture_registry
= ATOMIC_INIT(0);
1121 static unsigned long penguins_are_doing_time
;
1123 void smp_capture(void)
1125 int result
= atomic_add_ret(1, &smp_capture_depth
);
1128 int ncpus
= num_online_cpus();
1130 #ifdef CAPTURE_DEBUG
1131 printk("CPU[%d]: Sending penguins to jail...",
1132 smp_processor_id());
1134 penguins_are_doing_time
= 1;
1135 membar_storestore_loadstore();
1136 atomic_inc(&smp_capture_registry
);
1137 smp_cross_call(&xcall_capture
, 0, 0, 0);
1138 while (atomic_read(&smp_capture_registry
) != ncpus
)
1140 #ifdef CAPTURE_DEBUG
1146 void smp_release(void)
1148 if (atomic_dec_and_test(&smp_capture_depth
)) {
1149 #ifdef CAPTURE_DEBUG
1150 printk("CPU[%d]: Giving pardon to "
1151 "imprisoned penguins\n",
1152 smp_processor_id());
1154 penguins_are_doing_time
= 0;
1155 membar_storeload_storestore();
1156 atomic_dec(&smp_capture_registry
);
1160 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1161 * can service tlb flush xcalls...
1163 extern void prom_world(int);
1165 void smp_penguin_jailcell(int irq
, struct pt_regs
*regs
)
1167 clear_softint(1 << irq
);
1171 __asm__
__volatile__("flushw");
1173 atomic_inc(&smp_capture_registry
);
1174 membar_storeload_storestore();
1175 while (penguins_are_doing_time
)
1177 atomic_dec(&smp_capture_registry
);
1183 #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1184 #define prof_counter(__cpu) cpu_data(__cpu).counter
1186 void smp_percpu_timer_interrupt(struct pt_regs
*regs
)
1188 unsigned long compare
, tick
, pstate
;
1189 int cpu
= smp_processor_id();
1190 int user
= user_mode(regs
);
1191 struct pt_regs
*old_regs
;
1194 * Check for level 14 softint.
1197 unsigned long tick_mask
= tick_ops
->softint_mask
;
1199 if (!(get_softint() & tick_mask
)) {
1200 extern void handler_irq(int, struct pt_regs
*);
1202 handler_irq(14, regs
);
1205 clear_softint(tick_mask
);
1208 old_regs
= set_irq_regs(regs
);
1210 profile_tick(CPU_PROFILING
);
1211 if (!--prof_counter(cpu
)) {
1214 if (cpu
== boot_cpu_id
) {
1215 kstat_this_cpu
.irqs
[0]++;
1216 timer_tick_interrupt(regs
);
1219 update_process_times(user
);
1223 prof_counter(cpu
) = prof_multiplier(cpu
);
1226 /* Guarantee that the following sequences execute
1229 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
1230 "wrpr %0, %1, %%pstate"
1234 compare
= tick_ops
->add_compare(current_tick_offset
);
1235 tick
= tick_ops
->get_tick();
1237 /* Restore PSTATE_IE. */
1238 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
1241 } while (time_after_eq(tick
, compare
));
1242 set_irq_regs(old_regs
);
1245 static void __init
smp_setup_percpu_timer(void)
1247 int cpu
= smp_processor_id();
1248 unsigned long pstate
;
1250 prof_counter(cpu
) = prof_multiplier(cpu
) = 1;
1252 /* Guarantee that the following sequences execute
1255 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
1256 "wrpr %0, %1, %%pstate"
1260 tick_ops
->init_tick(current_tick_offset
);
1262 /* Restore PSTATE_IE. */
1263 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
1268 void __init
smp_tick_init(void)
1270 boot_cpu_id
= hard_smp_processor_id();
1271 current_tick_offset
= timer_tick_offset
;
1273 prof_counter(boot_cpu_id
) = prof_multiplier(boot_cpu_id
) = 1;
1276 /* /proc/profile writes can call this, don't __init it please. */
1277 static DEFINE_SPINLOCK(prof_setup_lock
);
1279 int setup_profiling_timer(unsigned int multiplier
)
1281 unsigned long flags
;
1284 if ((!multiplier
) || (timer_tick_offset
/ multiplier
) < 1000)
1287 spin_lock_irqsave(&prof_setup_lock
, flags
);
1288 for_each_possible_cpu(i
)
1289 prof_multiplier(i
) = multiplier
;
1290 current_tick_offset
= (timer_tick_offset
/ multiplier
);
1291 spin_unlock_irqrestore(&prof_setup_lock
, flags
);
1296 static void __init
smp_tune_scheduling(void)
1298 struct device_node
*dp
;
1300 unsigned int def
, smallest
= ~0U;
1302 def
= ((tlb_type
== hypervisor
) ?
1307 while (!cpu_find_by_instance(instance
, &dp
, NULL
)) {
1310 val
= of_getintprop_default(dp
, "ecache-size", def
);
1317 /* Any value less than 256K is nonsense. */
1318 if (smallest
< (256U * 1024U))
1319 smallest
= 256 * 1024;
1321 max_cache_size
= smallest
;
1323 if (smallest
< 1U * 1024U * 1024U)
1324 printk(KERN_INFO
"Using max_cache_size of %uKB\n",
1327 printk(KERN_INFO
"Using max_cache_size of %uMB\n",
1328 smallest
/ 1024U / 1024U);
1331 /* Constrain the number of cpus to max_cpus. */
1332 void __init
smp_prepare_cpus(unsigned int max_cpus
)
1336 if (num_possible_cpus() > max_cpus
) {
1340 while (!cpu_find_by_instance(instance
, NULL
, &mid
)) {
1341 if (mid
!= boot_cpu_id
) {
1342 cpu_clear(mid
, phys_cpu_present_map
);
1343 cpu_clear(mid
, cpu_present_map
);
1344 if (num_possible_cpus() <= max_cpus
)
1351 for_each_possible_cpu(i
) {
1352 if (tlb_type
== hypervisor
) {
1355 /* XXX get this mapping from machine description */
1356 for_each_possible_cpu(j
) {
1357 if ((j
>> 2) == (i
>> 2))
1358 cpu_set(j
, cpu_sibling_map
[i
]);
1361 cpu_set(i
, cpu_sibling_map
[i
]);
1365 smp_store_cpu_info(boot_cpu_id
);
1366 smp_tune_scheduling();
1369 /* Set this up early so that things like the scheduler can init
1370 * properly. We use the same cpu mask for both the present and
1373 void __init
smp_setup_cpu_possible_map(void)
1378 while (!cpu_find_by_instance(instance
, NULL
, &mid
)) {
1379 if (mid
< NR_CPUS
) {
1380 cpu_set(mid
, phys_cpu_present_map
);
1381 cpu_set(mid
, cpu_present_map
);
1387 void __devinit
smp_prepare_boot_cpu(void)
1391 int __cpuinit
__cpu_up(unsigned int cpu
)
1393 int ret
= smp_boot_one_cpu(cpu
);
1396 cpu_set(cpu
, smp_commenced_mask
);
1397 while (!cpu_isset(cpu
, cpu_online_map
))
1399 if (!cpu_isset(cpu
, cpu_online_map
)) {
1402 /* On SUN4V, writes to %tick and %stick are
1405 if (tlb_type
!= hypervisor
)
1406 smp_synchronize_one_tick(cpu
);
1412 void __init
smp_cpus_done(unsigned int max_cpus
)
1414 unsigned long bogosum
= 0;
1417 for_each_online_cpu(i
)
1418 bogosum
+= cpu_data(i
).udelay_val
;
1419 printk("Total of %ld processors activated "
1420 "(%lu.%02lu BogoMIPS).\n",
1421 (long) num_online_cpus(),
1422 bogosum
/(500000/HZ
),
1423 (bogosum
/(5000/HZ
))%100);
1426 void smp_send_reschedule(int cpu
)
1428 smp_receive_signal(cpu
);
1431 /* This is a nop because we capture all other cpus
1432 * anyways when making the PROM active.
1434 void smp_send_stop(void)
1438 unsigned long __per_cpu_base __read_mostly
;
1439 unsigned long __per_cpu_shift __read_mostly
;
1441 EXPORT_SYMBOL(__per_cpu_base
);
1442 EXPORT_SYMBOL(__per_cpu_shift
);
1444 void __init
setup_per_cpu_areas(void)
1446 unsigned long goal
, size
, i
;
1449 /* Copy section for each CPU (we discard the original) */
1450 goal
= PERCPU_ENOUGH_ROOM
;
1452 __per_cpu_shift
= 0;
1453 for (size
= 1UL; size
< goal
; size
<<= 1UL)
1456 ptr
= alloc_bootmem(size
* NR_CPUS
);
1458 __per_cpu_base
= ptr
- __per_cpu_start
;
1460 for (i
= 0; i
< NR_CPUS
; i
++, ptr
+= size
)
1461 memcpy(ptr
, __per_cpu_start
, __per_cpu_end
- __per_cpu_start
);
1463 /* Setup %g5 for the boot cpu. */
1464 __local_per_cpu_offset
= __per_cpu_offset(smp_processor_id());