2 * $Id: smp.c,v 1.54 1999/06/24 17:13:34 cort Exp $
6 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
7 * deal of code from the sparc and intel versions.
9 * Support for PReP (Motorola MTX/MVME) SMP by Troy Benjegerdes
10 * (troy@microux.com, hozer@drgw.net)
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/tasks.h>
16 #include <linux/smp.h>
17 #include <linux/smp_lock.h>
18 #include <linux/interrupt.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/delay.h>
21 #define __KERNEL_SYSCALLS__
22 #include <linux/unistd.h>
23 #include <linux/init.h>
24 #include <linux/openpic.h>
26 #include <asm/ptrace.h>
27 #include <asm/atomic.h>
30 #include <asm/pgtable.h>
31 #include <asm/spinlock.h>
32 #include <asm/hardirq.h>
33 #include <asm/softirq.h>
39 int first_cpu_booted
= 0;
40 int smp_threads_ready
= 0;
41 volatile int smp_commenced
= 0;
43 struct cpuinfo_PPC cpu_data
[NR_CPUS
];
44 struct klock_info_struct klock_info
= { KLOCK_CLEAR
, 0 };
45 volatile unsigned char active_kernel_processor
= NO_PROC_ID
; /* Processor holding kernel spinlock */
46 volatile unsigned long ipi_count
;
47 spinlock_t kernel_flag
= SPIN_LOCK_UNLOCKED
;
48 unsigned int prof_multiplier
[NR_CPUS
];
49 unsigned int prof_counter
[NR_CPUS
];
50 cycles_t cacheflush_time
;
52 /* all cpu mappings are 1-1 -- Cort */
53 int cpu_number_map
[NR_CPUS
] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,};
54 volatile unsigned long cpu_callin_map
[NR_CPUS
] = {0,};
56 int start_secondary(void *);
57 extern int cpu_idle(void *unused
);
58 u_int
openpic_read(volatile u_int
*addr
);
60 /* register for interrupting the secondary processor on the powersurge */
61 #define PSURGE_INTR ((volatile unsigned *)0xf80000c0)
63 void smp_local_timer_interrupt(struct pt_regs
* regs
)
65 int cpu
= smp_processor_id();
66 extern void update_one_process(struct task_struct
*,unsigned long,
67 unsigned long,unsigned long,int);
68 if (!--prof_counter
[cpu
]) {
70 struct task_struct
* p
= current
;
73 * After doing the above, we need to make like
74 * a normal interrupt - otherwise timer interrupts
75 * ignore the global interrupt lock, which is the
76 * WrongThing (tm) to do.
85 update_one_process(p
, 1, user
, system
, cpu
);
88 if (p
->counter
<= 0) {
90 current
->need_resched
= 1;
92 if (p
->priority
< DEF_PRIORITY
) {
93 kstat
.cpu_nice
+= user
;
94 kstat
.per_cpu_nice
[cpu
] += user
;
96 kstat
.cpu_user
+= user
;
97 kstat
.per_cpu_user
[cpu
] += user
;
100 kstat
.cpu_system
+= system
;
101 kstat
.per_cpu_system
[cpu
] += system
;
104 prof_counter
[cpu
]=prof_multiplier
[cpu
];
109 * Dirty hack to get smp message passing working.
111 * As it is now, if we're sending two message at the same time
112 * we have race conditions. The PowerSurge doesn't easily
113 * allow us to send IPI messages so we put the messages in
116 * This is because don't have several IPI's on the PowerSurge even though
117 * we do on the chrp. It would be nice to use the actual IPI's on the chrp
118 * rather than this but having two methods of doing IPI isn't a good idea
122 int smp_message
[NR_CPUS
];
123 void smp_message_recv(void)
125 int msg
= smp_message
[smp_processor_id()];
127 if ( _machine
== _MACH_Pmac
)
129 /* clear interrupt */
130 out_be32(PSURGE_INTR
, ~0);
133 /* make sure msg is for us */
134 if ( msg
== -1 ) return;
145 current
->need_resched
= 1;
147 case 0xf0f0: /* syncing time bases - just return */
150 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
151 smp_processor_id(), msg
);
155 smp_message
[smp_processor_id()] = -1;
158 void smp_send_reschedule(int cpu
)
160 /* This is only used if `cpu' is running an idle task,
161 so it will reschedule itself anyway... */
162 /*smp_message_pass(cpu, MSG_RESCHEDULE, 0, 0);*/
165 void smp_send_stop(void)
167 smp_message_pass(MSG_ALL_BUT_SELF
, MSG_STOP_CPU
, 0, 0);
170 spinlock_t mesg_pass_lock
= SPIN_LOCK_UNLOCKED
;
171 void smp_message_pass(int target
, int msg
, unsigned long data
, int wait
)
174 if ( !(_machine
& (_MACH_Pmac
|_MACH_chrp
)) )
177 spin_lock(&mesg_pass_lock
);
180 * We assume here that the msg is not -1. If it is,
181 * the recipient won't know the message was destined
188 smp_message
[smp_processor_id()] = msg
;
190 case MSG_ALL_BUT_SELF
:
191 for ( i
= 0 ; i
< smp_num_cpus
; i
++ )
192 if ( i
!= smp_processor_id () )
193 smp_message
[i
] = msg
;
196 smp_message
[target
] = msg
;
200 if ( _machine
== _MACH_Pmac
)
202 /* interrupt secondary processor */
203 out_be32(PSURGE_INTR
, ~0);
204 out_be32(PSURGE_INTR
, 0);
206 * Assume for now that the secondary doesn't send
209 /* interrupt primary */
210 /**(volatile unsigned long *)(0xf3019000);*/
213 if ( _machine
== _MACH_chrp
)
216 * There has to be some way of doing this better -
217 * perhaps a sent-to-all or send-to-all-but-self
218 * in the openpic. This gets us going for now, though.
224 for ( i
= 0 ; i
< smp_num_cpus
; i
++ )
225 openpic_cause_IPI(i
, 0, 0xffffffff );
227 case MSG_ALL_BUT_SELF
:
228 for ( i
= 0 ; i
< smp_num_cpus
; i
++ )
229 if ( i
!= smp_processor_id () )
230 openpic_cause_IPI(i
, 0,
231 0xffffffff & ~(1 << smp_processor_id()));
234 openpic_cause_IPI(target
, 0, 1U << target
);
239 spin_unlock(&mesg_pass_lock
);
242 void __init
smp_boot_cpus(void)
244 extern struct task_struct
*current_set
[NR_CPUS
];
245 extern void __secondary_start_psurge(void);
246 extern void __secondary_start_chrp(void);
248 struct task_struct
*p
;
251 printk("Entering SMP Mode...\n");
252 /* let other processors know to not do certain initialization */
253 first_cpu_booted
= 1;
257 * assume for now that the first cpu booted is
258 * cpu 0, the master -- Cort
260 cpu_callin_map
[0] = 1;
261 smp_store_cpu_info(0);
262 active_kernel_processor
= 0;
263 current
->processor
= 0;
265 for (i
= 0; i
< NR_CPUS
; i
++) {
267 prof_multiplier
[i
] = 1;
271 * XXX very rough, assumes 20 bus cycles to read a cache line,
272 * timebase increments every 4 bus cycles, 32kB L1 data cache.
274 cacheflush_time
= 5 * 1024;
276 if ( !(_machine
& (_MACH_Pmac
|_MACH_chrp
)) )
278 printk("SMP not supported on this machine.\n");
285 /* assume powersurge board - 2 processors -- Cort */
289 cpu_nr
= ((openpic_read(&OpenPIC
->Global
.Feature_Reporting0
)
290 & OPENPIC_FEATURE_LAST_PROCESSOR_MASK
) >>
291 OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT
)+1;
296 * only check for cpus we know exist. We keep the callin map
297 * with cpus at the bottom -- Cort
299 for ( i
= 1 ; i
< cpu_nr
; i
++ )
303 /* create a process for the processor */
304 kernel_thread(start_secondary
, NULL
, CLONE_PID
);
307 panic("No idle task for secondary processor\n");
312 /* need to flush here since secondary bats aren't setup */
313 for (a
= KERNELBASE
; a
< KERNELBASE
+ 0x800000; a
+= 32)
314 asm volatile("dcbf 0,%0" : : "r" (a
) : "memory");
315 asm volatile("sync");
321 /* setup entry point of secondary processor */
322 *(volatile unsigned long *)(0xf2800000) =
323 (unsigned long)__secondary_start_psurge
-KERNELBASE
;
325 /* interrupt secondary to begin executing code */
326 out_be32(PSURGE_INTR
, ~0);
327 out_be32(PSURGE_INTR
, 0);
330 *(unsigned long *)KERNELBASE
= i
;
331 asm volatile("dcbf 0,%0"::"r"(KERNELBASE
):"memory");
333 device
= find_type_devices("cpu");
334 /* assume cpu device list is in order, find the ith cpu */
335 for ( a
= i
; device
&& a
; device
= device
->next
, a
-- )
339 printk( "Starting %s (%lu): ", device
->full_name
,
340 *(ulong
*)get_property(device
, "reg", NULL
) );
341 call_rtas( "start-cpu", 3, 1, NULL
,
342 *(ulong
*)get_property(device
, "reg", NULL
),
343 __pa(__secondary_start_chrp
), i
);
349 * wait to see if the cpu made a callin (is actually up).
350 * use this value that I found through experimentation.
353 for ( c
= 1000; c
&& !cpu_callin_map
[i
] ; c
-- )
356 if ( cpu_callin_map
[i
] )
358 printk("Processor %d found.\n", i
);
359 /* this sync's the decr's -- Cort */
360 if ( _machine
== _MACH_Pmac
)
361 set_dec(decrementer_count
);
364 printk("Processor %d is stuck.\n", i
);
368 if ( _machine
== _MACH_Pmac
)
370 /* reset the entry point so if we get another intr we won't
371 * try to startup again */
372 *(volatile unsigned long *)(0xf2800000) = 0x100;
373 /* send interrupt to other processors to start decr's on all cpus */
374 smp_message_pass(1,0xf0f0, 0, 0);
378 void __init
smp_commence(void)
381 * Lets the callin's below out of their loop.
386 /* intel needs this */
387 void __init
initialize_secondary(void)
391 /* Activate a secondary processor. */
392 asmlinkage
int __init
start_secondary(void *unused
)
395 return cpu_idle(NULL
);
398 void __init
smp_callin(void)
400 smp_store_cpu_info(current
->processor
);
401 set_dec(decrementer_count
);
404 current
->mm
->mmap
->vm_page_prot
= PAGE_SHARED
;
405 current
->mm
->mmap
->vm_start
= PAGE_OFFSET
;
406 current
->mm
->mmap
->vm_end
= init_task
.mm
->mmap
->vm_end
;
408 cpu_callin_map
[current
->processor
] = 1;
409 while(!smp_commenced
)
414 void __init
smp_setup(char *str
, int *ints
)
418 int __init
setup_profiling_timer(unsigned int multiplier
)
423 void __init
smp_store_cpu_info(int id
)
425 struct cpuinfo_PPC
*c
= &cpu_data
[id
];
427 /* assume bogomips are same for everything */
428 c
->loops_per_sec
= loops_per_sec
;