1 /* smp.c: Sparc SMP support.
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/threads.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/delay.h>
24 #include <asm/ptrace.h>
25 #include <asm/atomic.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
31 #include <asm/oplib.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cpudata.h>
39 volatile unsigned long cpu_callin_map
[NR_CPUS
] __cpuinitdata
= {0,};
41 cpumask_t smp_commenced_mask
= CPU_MASK_NONE
;
43 /* The only guaranteed locking primitive available on all Sparc
44 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
45 * places the current byte at the effective address into dest_reg and
46 * places 0xff there afterwards. Pretty lame locking primitive
47 * compared to the Alpha and the Intel no? Most Sparcs have 'swap'
48 * instruction which is much better...
51 void __cpuinit
smp_store_cpu_info(int id
)
55 cpu_data(id
).udelay_val
= loops_per_jiffy
;
57 cpu_find_by_mid(id
, &cpu_node
);
58 cpu_data(id
).clock_tick
= prom_getintdefault(cpu_node
,
59 "clock-frequency", 0);
60 cpu_data(id
).prom_node
= cpu_node
;
61 cpu_data(id
).mid
= cpu_get_hwmid(cpu_node
);
63 if (cpu_data(id
).mid
< 0)
64 panic("No MID found for CPU%d at node 0x%08d", id
, cpu_node
);
67 void __init
smp_cpus_done(unsigned int max_cpus
)
69 extern void smp4m_smp_done(void);
70 extern void smp4d_smp_done(void);
71 unsigned long bogosum
= 0;
74 for_each_online_cpu(cpu
) {
76 bogosum
+= cpu_data(cpu
).udelay_val
;
79 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
80 num
, bogosum
/(500000/HZ
),
81 (bogosum
/(5000/HZ
))%100);
83 switch(sparc_cpu_model
) {
110 printk("UNKNOWN!\n");
118 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
119 panic("SMP bolixed\n");
122 struct linux_prom_registers smp_penguin_ctable __cpuinitdata
= { 0 };
124 void smp_send_reschedule(int cpu
)
127 * CPU model dependent way of implementing IPI generation targeting
128 * a single CPU. The trap handler needs only to do trap entry/return
131 BTFIXUP_CALL(smp_ipi_resched
)(cpu
);
134 void smp_send_stop(void)
138 void arch_send_call_function_single_ipi(int cpu
)
140 /* trigger one IPI single call on one CPU */
141 BTFIXUP_CALL(smp_ipi_single
)(cpu
);
144 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
148 /* trigger IPI mask call on each CPU */
149 for_each_cpu(cpu
, mask
)
150 BTFIXUP_CALL(smp_ipi_mask_one
)(cpu
);
153 void smp_resched_interrupt(void)
155 local_cpu_data().irq_resched_count
++;
157 * do nothing, since it all was about calling re-schedule
158 * routine called by interrupt return code.
162 void smp_call_function_single_interrupt(void)
165 generic_smp_call_function_single_interrupt();
166 local_cpu_data().irq_call_count
++;
170 void smp_call_function_interrupt(void)
173 generic_smp_call_function_interrupt();
174 local_cpu_data().irq_call_count
++;
178 void smp_flush_cache_all(void)
180 xc0((smpfunc_t
) BTFIXUP_CALL(local_flush_cache_all
));
181 local_flush_cache_all();
184 void smp_flush_tlb_all(void)
186 xc0((smpfunc_t
) BTFIXUP_CALL(local_flush_tlb_all
));
187 local_flush_tlb_all();
190 void smp_flush_cache_mm(struct mm_struct
*mm
)
192 if(mm
->context
!= NO_CONTEXT
) {
194 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
195 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
196 if (!cpumask_empty(&cpu_mask
))
197 xc1((smpfunc_t
) BTFIXUP_CALL(local_flush_cache_mm
), (unsigned long) mm
);
198 local_flush_cache_mm(mm
);
202 void smp_flush_tlb_mm(struct mm_struct
*mm
)
204 if(mm
->context
!= NO_CONTEXT
) {
206 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
207 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
208 if (!cpumask_empty(&cpu_mask
)) {
209 xc1((smpfunc_t
) BTFIXUP_CALL(local_flush_tlb_mm
), (unsigned long) mm
);
210 if(atomic_read(&mm
->mm_users
) == 1 && current
->active_mm
== mm
)
211 cpumask_copy(mm_cpumask(mm
),
212 cpumask_of(smp_processor_id()));
214 local_flush_tlb_mm(mm
);
218 void smp_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
221 struct mm_struct
*mm
= vma
->vm_mm
;
223 if (mm
->context
!= NO_CONTEXT
) {
225 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
226 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
227 if (!cpumask_empty(&cpu_mask
))
228 xc3((smpfunc_t
) BTFIXUP_CALL(local_flush_cache_range
), (unsigned long) vma
, start
, end
);
229 local_flush_cache_range(vma
, start
, end
);
233 void smp_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
236 struct mm_struct
*mm
= vma
->vm_mm
;
238 if (mm
->context
!= NO_CONTEXT
) {
240 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
241 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
242 if (!cpumask_empty(&cpu_mask
))
243 xc3((smpfunc_t
) BTFIXUP_CALL(local_flush_tlb_range
), (unsigned long) vma
, start
, end
);
244 local_flush_tlb_range(vma
, start
, end
);
248 void smp_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
250 struct mm_struct
*mm
= vma
->vm_mm
;
252 if(mm
->context
!= NO_CONTEXT
) {
254 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
255 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
256 if (!cpumask_empty(&cpu_mask
))
257 xc2((smpfunc_t
) BTFIXUP_CALL(local_flush_cache_page
), (unsigned long) vma
, page
);
258 local_flush_cache_page(vma
, page
);
262 void smp_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
264 struct mm_struct
*mm
= vma
->vm_mm
;
266 if(mm
->context
!= NO_CONTEXT
) {
268 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
269 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
270 if (!cpumask_empty(&cpu_mask
))
271 xc2((smpfunc_t
) BTFIXUP_CALL(local_flush_tlb_page
), (unsigned long) vma
, page
);
272 local_flush_tlb_page(vma
, page
);
276 void smp_flush_page_to_ram(unsigned long page
)
278 /* Current theory is that those who call this are the one's
279 * who have just dirtied their cache with the pages contents
280 * in kernel space, therefore we only run this on local cpu.
282 * XXX This experiment failed, research further... -DaveM
285 xc1((smpfunc_t
) BTFIXUP_CALL(local_flush_page_to_ram
), page
);
287 local_flush_page_to_ram(page
);
290 void smp_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
293 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
294 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
295 if (!cpumask_empty(&cpu_mask
))
296 xc2((smpfunc_t
) BTFIXUP_CALL(local_flush_sig_insns
), (unsigned long) mm
, insn_addr
);
297 local_flush_sig_insns(mm
, insn_addr
);
300 extern unsigned int lvl14_resolution
;
302 /* /proc/profile writes can call this, don't __init it please. */
303 static DEFINE_SPINLOCK(prof_setup_lock
);
305 int setup_profiling_timer(unsigned int multiplier
)
310 /* Prevent level14 ticker IRQ flooding. */
311 if((!multiplier
) || (lvl14_resolution
/ multiplier
) < 500)
314 spin_lock_irqsave(&prof_setup_lock
, flags
);
315 for_each_possible_cpu(i
) {
316 load_profile_irq(i
, lvl14_resolution
/ multiplier
);
317 prof_multiplier(i
) = multiplier
;
319 spin_unlock_irqrestore(&prof_setup_lock
, flags
);
324 void __init
smp_prepare_cpus(unsigned int max_cpus
)
326 extern void __init
smp4m_boot_cpus(void);
327 extern void __init
smp4d_boot_cpus(void);
330 printk("Entering SMP Mode...\n");
333 for (i
= 0; !cpu_find_by_instance(i
, NULL
, &cpuid
); i
++) {
334 if (cpuid
>= NR_CPUS
)
337 /* i = number of cpus */
338 if (extra
&& max_cpus
> i
- extra
)
339 printk("Warning: NR_CPUS is too low to start all cpus\n");
341 smp_store_cpu_info(boot_cpu_id
);
343 switch(sparc_cpu_model
) {
370 printk("UNKNOWN!\n");
376 /* Set this up early so that things like the scheduler can init
377 * properly. We use the same cpu mask for both the present and
380 void __init
smp_setup_cpu_possible_map(void)
385 while (!cpu_find_by_instance(instance
, NULL
, &mid
)) {
387 set_cpu_possible(mid
, true);
388 set_cpu_present(mid
, true);
394 void __init
smp_prepare_boot_cpu(void)
396 int cpuid
= hard_smp_processor_id();
398 if (cpuid
>= NR_CPUS
) {
399 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
403 printk("boot cpu id != 0, this could work but is untested\n");
405 current_thread_info()->cpu
= cpuid
;
406 set_cpu_online(cpuid
, true);
407 set_cpu_possible(cpuid
, true);
410 int __cpuinit
__cpu_up(unsigned int cpu
)
412 extern int __cpuinit
smp4m_boot_one_cpu(int);
413 extern int __cpuinit
smp4d_boot_one_cpu(int);
416 switch(sparc_cpu_model
) {
426 ret
= smp4m_boot_one_cpu(cpu
);
429 ret
= smp4d_boot_one_cpu(cpu
);
432 ret
= leon_boot_one_cpu(cpu
);
443 printk("UNKNOWN!\n");
449 cpumask_set_cpu(cpu
, &smp_commenced_mask
);
450 while (!cpu_online(cpu
))
456 void smp_bogo(struct seq_file
*m
)
460 for_each_online_cpu(i
) {
462 "Cpu%dBogo\t: %lu.%02lu\n",
464 cpu_data(i
).udelay_val
/(500000/HZ
),
465 (cpu_data(i
).udelay_val
/(5000/HZ
))%100);
469 void smp_info(struct seq_file
*m
)
473 seq_printf(m
, "State:\n");
474 for_each_online_cpu(i
)
475 seq_printf(m
, "CPU%d\t\t: online\n", i
);